mca.c revision 314385
1/*-
2 * Copyright (c) 2009 Hudson River Trading LLC
3 * Written by: John H. Baldwin <jhb@FreeBSD.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28/*
29 * Support for x86 machine check architecture.
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: stable/11/sys/x86/x86/mca.c 314385 2017-02-28 11:41:57Z avg $");
34
35#ifdef __amd64__
36#define	DEV_APIC
37#else
38#include "opt_apic.h"
39#endif
40
41#include <sys/param.h>
42#include <sys/bus.h>
43#include <sys/interrupt.h>
44#include <sys/kernel.h>
45#include <sys/lock.h>
46#include <sys/malloc.h>
47#include <sys/mutex.h>
48#include <sys/proc.h>
49#include <sys/sched.h>
50#include <sys/smp.h>
51#include <sys/sysctl.h>
52#include <sys/systm.h>
53#include <sys/taskqueue.h>
54#include <machine/intr_machdep.h>
55#include <x86/apicvar.h>
56#include <machine/cpu.h>
57#include <machine/cputypes.h>
58#include <x86/mca.h>
59#include <machine/md_var.h>
60#include <machine/specialreg.h>
61
62/* Modes for mca_scan() */
63enum scan_mode {
64	POLLED,
65	MCE,
66	CMCI,
67};
68
69#ifdef DEV_APIC
70/*
71 * State maintained for each monitored MCx bank to control the
72 * corrected machine check interrupt threshold.
73 */
74struct cmc_state {
75	int	max_threshold;
76	time_t	last_intr;
77};
78#endif
79
80struct mca_internal {
81	struct mca_record rec;
82	int		logged;
83	STAILQ_ENTRY(mca_internal) link;
84};
85
86static MALLOC_DEFINE(M_MCA, "MCA", "Machine Check Architecture");
87
88static volatile int mca_count;	/* Number of records stored. */
89static int mca_banks;		/* Number of per-CPU register banks. */
90
91static SYSCTL_NODE(_hw, OID_AUTO, mca, CTLFLAG_RD, NULL,
92    "Machine Check Architecture");
93
94static int mca_enabled = 1;
95SYSCTL_INT(_hw_mca, OID_AUTO, enabled, CTLFLAG_RDTUN, &mca_enabled, 0,
96    "Administrative toggle for machine check support");
97
98static int amd10h_L1TP = 1;
99SYSCTL_INT(_hw_mca, OID_AUTO, amd10h_L1TP, CTLFLAG_RDTUN, &amd10h_L1TP, 0,
100    "Administrative toggle for logging of level one TLB parity (L1TP) errors");
101
102static int intel6h_HSD131;
103SYSCTL_INT(_hw_mca, OID_AUTO, intel6h_HSD131, CTLFLAG_RDTUN, &intel6h_HSD131, 0,
104    "Administrative toggle for logging of spurious corrected errors");
105
106int workaround_erratum383;
107SYSCTL_INT(_hw_mca, OID_AUTO, erratum383, CTLFLAG_RDTUN,
108    &workaround_erratum383, 0,
109    "Is the workaround for Erratum 383 on AMD Family 10h processors enabled?");
110
111static STAILQ_HEAD(, mca_internal) mca_freelist;
112static int mca_freecount;
113static STAILQ_HEAD(, mca_internal) mca_records;
114static struct callout mca_timer;
115static int mca_ticks = 3600;	/* Check hourly by default. */
116static struct taskqueue *mca_tq;
117static struct task mca_refill_task, mca_scan_task;
118static struct mtx mca_lock;
119
120#ifdef DEV_APIC
121static struct cmc_state **cmc_state;	/* Indexed by cpuid, bank */
122static int cmc_throttle = 60;	/* Time in seconds to throttle CMCI. */
123#endif
124
125static int
126sysctl_positive_int(SYSCTL_HANDLER_ARGS)
127{
128	int error, value;
129
130	value = *(int *)arg1;
131	error = sysctl_handle_int(oidp, &value, 0, req);
132	if (error || req->newptr == NULL)
133		return (error);
134	if (value <= 0)
135		return (EINVAL);
136	*(int *)arg1 = value;
137	return (0);
138}
139
140static int
141sysctl_mca_records(SYSCTL_HANDLER_ARGS)
142{
143	int *name = (int *)arg1;
144	u_int namelen = arg2;
145	struct mca_record record;
146	struct mca_internal *rec;
147	int i;
148
149	if (namelen != 1)
150		return (EINVAL);
151
152	if (name[0] < 0 || name[0] >= mca_count)
153		return (EINVAL);
154
155	mtx_lock_spin(&mca_lock);
156	if (name[0] >= mca_count) {
157		mtx_unlock_spin(&mca_lock);
158		return (EINVAL);
159	}
160	i = 0;
161	STAILQ_FOREACH(rec, &mca_records, link) {
162		if (i == name[0]) {
163			record = rec->rec;
164			break;
165		}
166		i++;
167	}
168	mtx_unlock_spin(&mca_lock);
169	return (SYSCTL_OUT(req, &record, sizeof(record)));
170}
171
172static const char *
173mca_error_ttype(uint16_t mca_error)
174{
175
176	switch ((mca_error & 0x000c) >> 2) {
177	case 0:
178		return ("I");
179	case 1:
180		return ("D");
181	case 2:
182		return ("G");
183	}
184	return ("?");
185}
186
187static const char *
188mca_error_level(uint16_t mca_error)
189{
190
191	switch (mca_error & 0x0003) {
192	case 0:
193		return ("L0");
194	case 1:
195		return ("L1");
196	case 2:
197		return ("L2");
198	case 3:
199		return ("LG");
200	}
201	return ("L?");
202}
203
204static const char *
205mca_error_request(uint16_t mca_error)
206{
207
208	switch ((mca_error & 0x00f0) >> 4) {
209	case 0x0:
210		return ("ERR");
211	case 0x1:
212		return ("RD");
213	case 0x2:
214		return ("WR");
215	case 0x3:
216		return ("DRD");
217	case 0x4:
218		return ("DWR");
219	case 0x5:
220		return ("IRD");
221	case 0x6:
222		return ("PREFETCH");
223	case 0x7:
224		return ("EVICT");
225	case 0x8:
226		return ("SNOOP");
227	}
228	return ("???");
229}
230
231static const char *
232mca_error_mmtype(uint16_t mca_error)
233{
234
235	switch ((mca_error & 0x70) >> 4) {
236	case 0x0:
237		return ("GEN");
238	case 0x1:
239		return ("RD");
240	case 0x2:
241		return ("WR");
242	case 0x3:
243		return ("AC");
244	case 0x4:
245		return ("MS");
246	}
247	return ("???");
248}
249
250static int
251mca_mute(const struct mca_record *rec)
252{
253
254	/*
255	 * Skip spurious corrected parity errors generated by Intel Haswell-
256	 * and Broadwell-based CPUs (see HSD131, HSM142, HSW131 and BDM48
257	 * erratum respectively), unless reporting is enabled.
258	 * Note that these errors also have been observed with the D0-stepping
259	 * of Haswell, while at least initially the CPU specification updates
260	 * suggested only the C0-stepping to be affected.  Similarly, Celeron
261	 * 2955U with a CPU ID of 0x45 apparently are also concerned with the
262	 * same problem, with HSM142 only referring to 0x3c and 0x46.
263	 */
264	if (cpu_vendor_id == CPU_VENDOR_INTEL &&
265	    CPUID_TO_FAMILY(cpu_id) == 0x6 &&
266	    (CPUID_TO_MODEL(cpu_id) == 0x3c ||	/* HSD131, HSM142, HSW131 */
267	    CPUID_TO_MODEL(cpu_id) == 0x3d ||	/* BDM48 */
268	    CPUID_TO_MODEL(cpu_id) == 0x45 ||
269	    CPUID_TO_MODEL(cpu_id) == 0x46) &&	/* HSM142 */
270	    rec->mr_bank == 0 &&
271	    (rec->mr_status & 0xa0000000ffffffff) == 0x80000000000f0005 &&
272	    !intel6h_HSD131)
273	    	return (1);
274
275	return (0);
276}
277
278/* Dump details about a single machine check. */
279static void
280mca_log(const struct mca_record *rec)
281{
282	uint16_t mca_error;
283
284	if (mca_mute(rec))
285	    	return;
286
287	printf("MCA: Bank %d, Status 0x%016llx\n", rec->mr_bank,
288	    (long long)rec->mr_status);
289	printf("MCA: Global Cap 0x%016llx, Status 0x%016llx\n",
290	    (long long)rec->mr_mcg_cap, (long long)rec->mr_mcg_status);
291	printf("MCA: Vendor \"%s\", ID 0x%x, APIC ID %d\n", cpu_vendor,
292	    rec->mr_cpu_id, rec->mr_apic_id);
293	printf("MCA: CPU %d ", rec->mr_cpu);
294	if (rec->mr_status & MC_STATUS_UC)
295		printf("UNCOR ");
296	else {
297		printf("COR ");
298		if (rec->mr_mcg_cap & MCG_CAP_CMCI_P)
299			printf("(%lld) ", ((long long)rec->mr_status &
300			    MC_STATUS_COR_COUNT) >> 38);
301	}
302	if (rec->mr_status & MC_STATUS_PCC)
303		printf("PCC ");
304	if (rec->mr_status & MC_STATUS_OVER)
305		printf("OVER ");
306	mca_error = rec->mr_status & MC_STATUS_MCA_ERROR;
307	switch (mca_error) {
308		/* Simple error codes. */
309	case 0x0000:
310		printf("no error");
311		break;
312	case 0x0001:
313		printf("unclassified error");
314		break;
315	case 0x0002:
316		printf("ucode ROM parity error");
317		break;
318	case 0x0003:
319		printf("external error");
320		break;
321	case 0x0004:
322		printf("FRC error");
323		break;
324	case 0x0005:
325		printf("internal parity error");
326		break;
327	case 0x0400:
328		printf("internal timer error");
329		break;
330	default:
331		if ((mca_error & 0xfc00) == 0x0400) {
332			printf("internal error %x", mca_error & 0x03ff);
333			break;
334		}
335
336		/* Compound error codes. */
337
338		/* Memory hierarchy error. */
339		if ((mca_error & 0xeffc) == 0x000c) {
340			printf("%s memory error", mca_error_level(mca_error));
341			break;
342		}
343
344		/* TLB error. */
345		if ((mca_error & 0xeff0) == 0x0010) {
346			printf("%sTLB %s error", mca_error_ttype(mca_error),
347			    mca_error_level(mca_error));
348			break;
349		}
350
351		/* Memory controller error. */
352		if ((mca_error & 0xef80) == 0x0080) {
353			printf("%s channel ", mca_error_mmtype(mca_error));
354			if ((mca_error & 0x000f) != 0x000f)
355				printf("%d", mca_error & 0x000f);
356			else
357				printf("??");
358			printf(" memory error");
359			break;
360		}
361
362		/* Cache error. */
363		if ((mca_error & 0xef00) == 0x0100) {
364			printf("%sCACHE %s %s error",
365			    mca_error_ttype(mca_error),
366			    mca_error_level(mca_error),
367			    mca_error_request(mca_error));
368			break;
369		}
370
371		/* Bus and/or Interconnect error. */
372		if ((mca_error & 0xe800) == 0x0800) {
373			printf("BUS%s ", mca_error_level(mca_error));
374			switch ((mca_error & 0x0600) >> 9) {
375			case 0:
376				printf("Source");
377				break;
378			case 1:
379				printf("Responder");
380				break;
381			case 2:
382				printf("Observer");
383				break;
384			default:
385				printf("???");
386				break;
387			}
388			printf(" %s ", mca_error_request(mca_error));
389			switch ((mca_error & 0x000c) >> 2) {
390			case 0:
391				printf("Memory");
392				break;
393			case 2:
394				printf("I/O");
395				break;
396			case 3:
397				printf("Other");
398				break;
399			default:
400				printf("???");
401				break;
402			}
403			if (mca_error & 0x0100)
404				printf(" timed out");
405			break;
406		}
407
408		printf("unknown error %x", mca_error);
409		break;
410	}
411	printf("\n");
412	if (rec->mr_status & MC_STATUS_ADDRV)
413		printf("MCA: Address 0x%llx\n", (long long)rec->mr_addr);
414	if (rec->mr_status & MC_STATUS_MISCV)
415		printf("MCA: Misc 0x%llx\n", (long long)rec->mr_misc);
416}
417
418static int
419mca_check_status(int bank, struct mca_record *rec)
420{
421	uint64_t status;
422	u_int p[4];
423
424	status = rdmsr(MSR_MC_STATUS(bank));
425	if (!(status & MC_STATUS_VAL))
426		return (0);
427
428	/* Save exception information. */
429	rec->mr_status = status;
430	rec->mr_bank = bank;
431	rec->mr_addr = 0;
432	if (status & MC_STATUS_ADDRV)
433		rec->mr_addr = rdmsr(MSR_MC_ADDR(bank));
434	rec->mr_misc = 0;
435	if (status & MC_STATUS_MISCV)
436		rec->mr_misc = rdmsr(MSR_MC_MISC(bank));
437	rec->mr_tsc = rdtsc();
438	rec->mr_apic_id = PCPU_GET(apic_id);
439	rec->mr_mcg_cap = rdmsr(MSR_MCG_CAP);
440	rec->mr_mcg_status = rdmsr(MSR_MCG_STATUS);
441	rec->mr_cpu_id = cpu_id;
442	rec->mr_cpu_vendor_id = cpu_vendor_id;
443	rec->mr_cpu = PCPU_GET(cpuid);
444
445	/*
446	 * Clear machine check.  Don't do this for uncorrectable
447	 * errors so that the BIOS can see them.
448	 */
449	if (!(rec->mr_status & (MC_STATUS_PCC | MC_STATUS_UC))) {
450		wrmsr(MSR_MC_STATUS(bank), 0);
451		do_cpuid(0, p);
452	}
453	return (1);
454}
455
456static void
457mca_fill_freelist(void)
458{
459	struct mca_internal *rec;
460	int desired;
461
462	/*
463	 * Ensure we have at least one record for each bank and one
464	 * record per CPU.
465	 */
466	desired = imax(mp_ncpus, mca_banks);
467	mtx_lock_spin(&mca_lock);
468	while (mca_freecount < desired) {
469		mtx_unlock_spin(&mca_lock);
470		rec = malloc(sizeof(*rec), M_MCA, M_WAITOK);
471		mtx_lock_spin(&mca_lock);
472		STAILQ_INSERT_TAIL(&mca_freelist, rec, link);
473		mca_freecount++;
474	}
475	mtx_unlock_spin(&mca_lock);
476}
477
478static void
479mca_refill(void *context, int pending)
480{
481
482	mca_fill_freelist();
483}
484
485static void
486mca_record_entry(enum scan_mode mode, const struct mca_record *record)
487{
488	struct mca_internal *rec;
489
490	if (mode == POLLED) {
491		rec = malloc(sizeof(*rec), M_MCA, M_WAITOK);
492		mtx_lock_spin(&mca_lock);
493	} else {
494		mtx_lock_spin(&mca_lock);
495		rec = STAILQ_FIRST(&mca_freelist);
496		if (rec == NULL) {
497			printf("MCA: Unable to allocate space for an event.\n");
498			mca_log(record);
499			mtx_unlock_spin(&mca_lock);
500			return;
501		}
502		STAILQ_REMOVE_HEAD(&mca_freelist, link);
503		mca_freecount--;
504	}
505
506	rec->rec = *record;
507	rec->logged = 0;
508	STAILQ_INSERT_TAIL(&mca_records, rec, link);
509	mca_count++;
510	mtx_unlock_spin(&mca_lock);
511	if (mode == CMCI && !cold)
512		taskqueue_enqueue(mca_tq, &mca_refill_task);
513}
514
515#ifdef DEV_APIC
516/*
517 * Update the interrupt threshold for a CMCI.  The strategy is to use
518 * a low trigger that interrupts as soon as the first event occurs.
519 * However, if a steady stream of events arrive, the threshold is
520 * increased until the interrupts are throttled to once every
521 * cmc_throttle seconds or the periodic scan.  If a periodic scan
522 * finds that the threshold is too high, it is lowered.
523 */
524static void
525cmci_update(enum scan_mode mode, int bank, int valid, struct mca_record *rec)
526{
527	struct cmc_state *cc;
528	uint64_t ctl;
529	u_int delta;
530	int count, limit;
531
532	/* Fetch the current limit for this bank. */
533	cc = &cmc_state[PCPU_GET(cpuid)][bank];
534	ctl = rdmsr(MSR_MC_CTL2(bank));
535	count = (rec->mr_status & MC_STATUS_COR_COUNT) >> 38;
536	delta = (u_int)(time_uptime - cc->last_intr);
537
538	/*
539	 * If an interrupt was received less than cmc_throttle seconds
540	 * since the previous interrupt and the count from the current
541	 * event is greater than or equal to the current threshold,
542	 * double the threshold up to the max.
543	 */
544	if (mode == CMCI && valid) {
545		limit = ctl & MC_CTL2_THRESHOLD;
546		if (delta < cmc_throttle && count >= limit &&
547		    limit < cc->max_threshold) {
548			limit = min(limit << 1, cc->max_threshold);
549			ctl &= ~MC_CTL2_THRESHOLD;
550			ctl |= limit;
551			wrmsr(MSR_MC_CTL2(bank), ctl);
552		}
553		cc->last_intr = time_uptime;
554		return;
555	}
556
557	/*
558	 * When the banks are polled, check to see if the threshold
559	 * should be lowered.
560	 */
561	if (mode != POLLED)
562		return;
563
564	/* If a CMCI occured recently, do nothing for now. */
565	if (delta < cmc_throttle)
566		return;
567
568	/*
569	 * Compute a new limit based on the average rate of events per
570	 * cmc_throttle seconds since the last interrupt.
571	 */
572	if (valid) {
573		count = (rec->mr_status & MC_STATUS_COR_COUNT) >> 38;
574		limit = count * cmc_throttle / delta;
575		if (limit <= 0)
576			limit = 1;
577		else if (limit > cc->max_threshold)
578			limit = cc->max_threshold;
579	} else
580		limit = 1;
581	if ((ctl & MC_CTL2_THRESHOLD) != limit) {
582		ctl &= ~MC_CTL2_THRESHOLD;
583		ctl |= limit;
584		wrmsr(MSR_MC_CTL2(bank), ctl);
585	}
586}
587#endif
588
589/*
590 * This scans all the machine check banks of the current CPU to see if
591 * there are any machine checks.  Any non-recoverable errors are
592 * reported immediately via mca_log().  The current thread must be
593 * pinned when this is called.  The 'mode' parameter indicates if we
594 * are being called from the MC exception handler, the CMCI handler,
595 * or the periodic poller.  In the MC exception case this function
596 * returns true if the system is restartable.  Otherwise, it returns a
597 * count of the number of valid MC records found.
598 */
599static int
600mca_scan(enum scan_mode mode)
601{
602	struct mca_record rec;
603	uint64_t mcg_cap, ucmask;
604	int count, i, recoverable, valid;
605
606	count = 0;
607	recoverable = 1;
608	ucmask = MC_STATUS_UC | MC_STATUS_PCC;
609
610	/* When handling a MCE#, treat the OVER flag as non-restartable. */
611	if (mode == MCE)
612		ucmask |= MC_STATUS_OVER;
613	mcg_cap = rdmsr(MSR_MCG_CAP);
614	for (i = 0; i < (mcg_cap & MCG_CAP_COUNT); i++) {
615#ifdef DEV_APIC
616		/*
617		 * For a CMCI, only check banks this CPU is
618		 * responsible for.
619		 */
620		if (mode == CMCI && !(PCPU_GET(cmci_mask) & 1 << i))
621			continue;
622#endif
623
624		valid = mca_check_status(i, &rec);
625		if (valid) {
626			count++;
627			if (rec.mr_status & ucmask) {
628				recoverable = 0;
629				mtx_lock_spin(&mca_lock);
630				mca_log(&rec);
631				mtx_unlock_spin(&mca_lock);
632			}
633			mca_record_entry(mode, &rec);
634		}
635
636#ifdef DEV_APIC
637		/*
638		 * If this is a bank this CPU monitors via CMCI,
639		 * update the threshold.
640		 */
641		if (PCPU_GET(cmci_mask) & 1 << i)
642			cmci_update(mode, i, valid, &rec);
643#endif
644	}
645	if (mode == POLLED)
646		mca_fill_freelist();
647	return (mode == MCE ? recoverable : count);
648}
649
650/*
651 * Scan the machine check banks on all CPUs by binding to each CPU in
652 * turn.  If any of the CPUs contained new machine check records, log
653 * them to the console.
654 */
655static void
656mca_scan_cpus(void *context, int pending)
657{
658	struct mca_internal *mca;
659	struct thread *td;
660	int count, cpu;
661
662	mca_fill_freelist();
663	td = curthread;
664	count = 0;
665	thread_lock(td);
666	CPU_FOREACH(cpu) {
667		sched_bind(td, cpu);
668		thread_unlock(td);
669		count += mca_scan(POLLED);
670		thread_lock(td);
671		sched_unbind(td);
672	}
673	thread_unlock(td);
674	if (count != 0) {
675		mtx_lock_spin(&mca_lock);
676		STAILQ_FOREACH(mca, &mca_records, link) {
677			if (!mca->logged) {
678				mca->logged = 1;
679				mca_log(&mca->rec);
680			}
681		}
682		mtx_unlock_spin(&mca_lock);
683	}
684}
685
686static void
687mca_periodic_scan(void *arg)
688{
689
690	taskqueue_enqueue(mca_tq, &mca_scan_task);
691	callout_reset(&mca_timer, mca_ticks * hz, mca_periodic_scan, NULL);
692}
693
694static int
695sysctl_mca_scan(SYSCTL_HANDLER_ARGS)
696{
697	int error, i;
698
699	i = 0;
700	error = sysctl_handle_int(oidp, &i, 0, req);
701	if (error)
702		return (error);
703	if (i)
704		taskqueue_enqueue(mca_tq, &mca_scan_task);
705	return (0);
706}
707
708static void
709mca_createtq(void *dummy)
710{
711	if (mca_banks <= 0)
712		return;
713
714	mca_tq = taskqueue_create_fast("mca", M_WAITOK,
715	    taskqueue_thread_enqueue, &mca_tq);
716	taskqueue_start_threads(&mca_tq, 1, PI_SWI(SWI_TQ), "mca taskq");
717
718	/* CMCIs during boot may have claimed items from the freelist. */
719	mca_fill_freelist();
720}
721SYSINIT(mca_createtq, SI_SUB_CONFIGURE, SI_ORDER_ANY, mca_createtq, NULL);
722
723static void
724mca_startup(void *dummy)
725{
726
727	if (mca_banks <= 0)
728		return;
729
730	callout_reset(&mca_timer, mca_ticks * hz, mca_periodic_scan, NULL);
731}
732#ifdef EARLY_AP_STARTUP
733SYSINIT(mca_startup, SI_SUB_KICK_SCHEDULER, SI_ORDER_ANY, mca_startup, NULL);
734#else
735SYSINIT(mca_startup, SI_SUB_SMP, SI_ORDER_ANY, mca_startup, NULL);
736#endif
737
738#ifdef DEV_APIC
739static void
740cmci_setup(void)
741{
742	int i;
743
744	cmc_state = malloc((mp_maxid + 1) * sizeof(struct cmc_state *), M_MCA,
745	    M_WAITOK);
746	for (i = 0; i <= mp_maxid; i++)
747		cmc_state[i] = malloc(sizeof(struct cmc_state) * mca_banks,
748		    M_MCA, M_WAITOK | M_ZERO);
749	SYSCTL_ADD_PROC(NULL, SYSCTL_STATIC_CHILDREN(_hw_mca), OID_AUTO,
750	    "cmc_throttle", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
751	    &cmc_throttle, 0, sysctl_positive_int, "I",
752	    "Interval in seconds to throttle corrected MC interrupts");
753}
754#endif
755
756static void
757mca_setup(uint64_t mcg_cap)
758{
759
760	/*
761	 * On AMD Family 10h processors, unless logging of level one TLB
762	 * parity (L1TP) errors is disabled, enable the recommended workaround
763	 * for Erratum 383.
764	 */
765	if (cpu_vendor_id == CPU_VENDOR_AMD &&
766	    CPUID_TO_FAMILY(cpu_id) == 0x10 && amd10h_L1TP)
767		workaround_erratum383 = 1;
768
769	mca_banks = mcg_cap & MCG_CAP_COUNT;
770	mtx_init(&mca_lock, "mca", NULL, MTX_SPIN);
771	STAILQ_INIT(&mca_records);
772	TASK_INIT(&mca_scan_task, 0, mca_scan_cpus, NULL);
773	callout_init(&mca_timer, 1);
774	STAILQ_INIT(&mca_freelist);
775	TASK_INIT(&mca_refill_task, 0, mca_refill, NULL);
776	mca_fill_freelist();
777	SYSCTL_ADD_INT(NULL, SYSCTL_STATIC_CHILDREN(_hw_mca), OID_AUTO,
778	    "count", CTLFLAG_RD, (int *)(uintptr_t)&mca_count, 0,
779	    "Record count");
780	SYSCTL_ADD_PROC(NULL, SYSCTL_STATIC_CHILDREN(_hw_mca), OID_AUTO,
781	    "interval", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, &mca_ticks,
782	    0, sysctl_positive_int, "I",
783	    "Periodic interval in seconds to scan for machine checks");
784	SYSCTL_ADD_NODE(NULL, SYSCTL_STATIC_CHILDREN(_hw_mca), OID_AUTO,
785	    "records", CTLFLAG_RD, sysctl_mca_records, "Machine check records");
786	SYSCTL_ADD_PROC(NULL, SYSCTL_STATIC_CHILDREN(_hw_mca), OID_AUTO,
787	    "force_scan", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 0,
788	    sysctl_mca_scan, "I", "Force an immediate scan for machine checks");
789#ifdef DEV_APIC
790	if (mcg_cap & MCG_CAP_CMCI_P)
791		cmci_setup();
792#endif
793}
794
795#ifdef DEV_APIC
796/*
797 * See if we should monitor CMCI for this bank.  If CMCI_EN is already
798 * set in MC_CTL2, then another CPU is responsible for this bank, so
799 * ignore it.  If CMCI_EN returns zero after being set, then this bank
800 * does not support CMCI_EN.  If this CPU sets CMCI_EN, then it should
801 * now monitor this bank.
802 */
803static void
804cmci_monitor(int i)
805{
806	struct cmc_state *cc;
807	uint64_t ctl;
808
809	KASSERT(i < mca_banks, ("CPU %d has more MC banks", PCPU_GET(cpuid)));
810
811	ctl = rdmsr(MSR_MC_CTL2(i));
812	if (ctl & MC_CTL2_CMCI_EN)
813		/* Already monitored by another CPU. */
814		return;
815
816	/* Set the threshold to one event for now. */
817	ctl &= ~MC_CTL2_THRESHOLD;
818	ctl |= MC_CTL2_CMCI_EN | 1;
819	wrmsr(MSR_MC_CTL2(i), ctl);
820	ctl = rdmsr(MSR_MC_CTL2(i));
821	if (!(ctl & MC_CTL2_CMCI_EN))
822		/* This bank does not support CMCI. */
823		return;
824
825	cc = &cmc_state[PCPU_GET(cpuid)][i];
826
827	/* Determine maximum threshold. */
828	ctl &= ~MC_CTL2_THRESHOLD;
829	ctl |= 0x7fff;
830	wrmsr(MSR_MC_CTL2(i), ctl);
831	ctl = rdmsr(MSR_MC_CTL2(i));
832	cc->max_threshold = ctl & MC_CTL2_THRESHOLD;
833
834	/* Start off with a threshold of 1. */
835	ctl &= ~MC_CTL2_THRESHOLD;
836	ctl |= 1;
837	wrmsr(MSR_MC_CTL2(i), ctl);
838
839	/* Mark this bank as monitored. */
840	PCPU_SET(cmci_mask, PCPU_GET(cmci_mask) | 1 << i);
841}
842
843/*
844 * For resume, reset the threshold for any banks we monitor back to
845 * one and throw away the timestamp of the last interrupt.
846 */
847static void
848cmci_resume(int i)
849{
850	struct cmc_state *cc;
851	uint64_t ctl;
852
853	KASSERT(i < mca_banks, ("CPU %d has more MC banks", PCPU_GET(cpuid)));
854
855	/* Ignore banks not monitored by this CPU. */
856	if (!(PCPU_GET(cmci_mask) & 1 << i))
857		return;
858
859	cc = &cmc_state[PCPU_GET(cpuid)][i];
860	cc->last_intr = 0;
861	ctl = rdmsr(MSR_MC_CTL2(i));
862	ctl &= ~MC_CTL2_THRESHOLD;
863	ctl |= MC_CTL2_CMCI_EN | 1;
864	wrmsr(MSR_MC_CTL2(i), ctl);
865}
866#endif
867
868/*
869 * Initializes per-CPU machine check registers and enables corrected
870 * machine check interrupts.
871 */
872static void
873_mca_init(int boot)
874{
875	uint64_t mcg_cap;
876	uint64_t ctl, mask;
877	int i, skip;
878
879	/* MCE is required. */
880	if (!mca_enabled || !(cpu_feature & CPUID_MCE))
881		return;
882
883	if (cpu_feature & CPUID_MCA) {
884		if (boot)
885			PCPU_SET(cmci_mask, 0);
886
887		mcg_cap = rdmsr(MSR_MCG_CAP);
888		if (mcg_cap & MCG_CAP_CTL_P)
889			/* Enable MCA features. */
890			wrmsr(MSR_MCG_CTL, MCG_CTL_ENABLE);
891		if (PCPU_GET(cpuid) == 0 && boot)
892			mca_setup(mcg_cap);
893
894		/*
895		 * Disable logging of level one TLB parity (L1TP) errors by
896		 * the data cache as an alternative workaround for AMD Family
897		 * 10h Erratum 383.  Unlike the recommended workaround, there
898		 * is no performance penalty to this workaround.  However,
899		 * L1TP errors will go unreported.
900		 */
901		if (cpu_vendor_id == CPU_VENDOR_AMD &&
902		    CPUID_TO_FAMILY(cpu_id) == 0x10 && !amd10h_L1TP) {
903			mask = rdmsr(MSR_MC0_CTL_MASK);
904			if ((mask & (1UL << 5)) == 0)
905				wrmsr(MSR_MC0_CTL_MASK, mask | (1UL << 5));
906		}
907		for (i = 0; i < (mcg_cap & MCG_CAP_COUNT); i++) {
908			/* By default enable logging of all errors. */
909			ctl = 0xffffffffffffffffUL;
910			skip = 0;
911
912			if (cpu_vendor_id == CPU_VENDOR_INTEL) {
913				/*
914				 * For P6 models before Nehalem MC0_CTL is
915				 * always enabled and reserved.
916				 */
917				if (i == 0 && CPUID_TO_FAMILY(cpu_id) == 0x6
918				    && CPUID_TO_MODEL(cpu_id) < 0x1a)
919					skip = 1;
920			} else if (cpu_vendor_id == CPU_VENDOR_AMD) {
921				/* BKDG for Family 10h: unset GartTblWkEn. */
922				if (i == 4 && CPUID_TO_FAMILY(cpu_id) >= 0xf)
923					ctl &= ~(1UL << 10);
924			}
925
926			if (!skip)
927				wrmsr(MSR_MC_CTL(i), ctl);
928
929#ifdef DEV_APIC
930			if (mcg_cap & MCG_CAP_CMCI_P) {
931				if (boot)
932					cmci_monitor(i);
933				else
934					cmci_resume(i);
935			}
936#endif
937
938			/* Clear all errors. */
939			wrmsr(MSR_MC_STATUS(i), 0);
940		}
941
942#ifdef DEV_APIC
943		if (PCPU_GET(cmci_mask) != 0 && boot)
944			lapic_enable_cmc();
945#endif
946	}
947
948	load_cr4(rcr4() | CR4_MCE);
949}
950
951/* Must be executed on each CPU during boot. */
952void
953mca_init(void)
954{
955
956	_mca_init(1);
957}
958
959/* Must be executed on each CPU during resume. */
960void
961mca_resume(void)
962{
963
964	_mca_init(0);
965}
966
967/*
968 * The machine check registers for the BSP cannot be initialized until
969 * the local APIC is initialized.  This happens at SI_SUB_CPU,
970 * SI_ORDER_SECOND.
971 */
972static void
973mca_init_bsp(void *arg __unused)
974{
975
976	mca_init();
977}
978SYSINIT(mca_init_bsp, SI_SUB_CPU, SI_ORDER_ANY, mca_init_bsp, NULL);
979
980/* Called when a machine check exception fires. */
981void
982mca_intr(void)
983{
984	uint64_t mcg_status;
985	int old_count, recoverable;
986
987	if (!(cpu_feature & CPUID_MCA)) {
988		/*
989		 * Just print the values of the old Pentium registers
990		 * and panic.
991		 */
992		printf("MC Type: 0x%jx  Address: 0x%jx\n",
993		    (uintmax_t)rdmsr(MSR_P5_MC_TYPE),
994		    (uintmax_t)rdmsr(MSR_P5_MC_ADDR));
995		panic("Machine check");
996	}
997
998	/* Scan the banks and check for any non-recoverable errors. */
999	old_count = mca_count;
1000	recoverable = mca_scan(MCE);
1001	mcg_status = rdmsr(MSR_MCG_STATUS);
1002	if (!(mcg_status & MCG_STATUS_RIPV))
1003		recoverable = 0;
1004
1005	if (!recoverable) {
1006		/*
1007		 * Wait for at least one error to be logged before
1008		 * panic'ing.  Some errors will assert a machine check
1009		 * on all CPUs, but only certain CPUs will find a valid
1010		 * bank to log.
1011		 */
1012		while (mca_count == old_count)
1013			cpu_spinwait();
1014
1015		panic("Unrecoverable machine check exception");
1016	}
1017
1018	/* Clear MCIP. */
1019	wrmsr(MSR_MCG_STATUS, mcg_status & ~MCG_STATUS_MCIP);
1020}
1021
1022#ifdef DEV_APIC
1023/* Called for a CMCI (correctable machine check interrupt). */
1024void
1025cmc_intr(void)
1026{
1027	struct mca_internal *mca;
1028	int count;
1029
1030	/*
1031	 * Serialize MCA bank scanning to prevent collisions from
1032	 * sibling threads.
1033	 */
1034	count = mca_scan(CMCI);
1035
1036	/* If we found anything, log them to the console. */
1037	if (count != 0) {
1038		mtx_lock_spin(&mca_lock);
1039		STAILQ_FOREACH(mca, &mca_records, link) {
1040			if (!mca->logged) {
1041				mca->logged = 1;
1042				mca_log(&mca->rec);
1043			}
1044		}
1045		mtx_unlock_spin(&mca_lock);
1046	}
1047}
1048#endif
1049