1/*
2 * Copyright (c) 2007-2011 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <kern/kalloc.h>
30#include <mach/mach_time.h>
31#include <i386/cpu_data.h>
32#include <i386/cpuid.h>
33#include <i386/cpu_topology.h>
34#include <i386/cpu_threads.h>
35#include <i386/machine_cpu.h>
36#include <i386/machine_check.h>
37#include <i386/proc_reg.h>
38
39/*
40 * At the time of the machine-check exception, all hardware-threads panic.
41 * Each thread saves the state of its MCA registers to its per-cpu data area.
42 *
43 * State reporting is serialized so one thread dumps all valid state for all
44 * threads to the panic log. This may entail spinning waiting for other
45 * threads to complete saving state to memory. A timeout applies to this wait
46 * -- in particular, a 3-strikes timeout may prevent a thread from taking
47 * part is the affair.
48 */
49
50#define IF(bool,str)	((bool) ? (str) : "")
51
52static boolean_t	mca_initialized = FALSE;
53static boolean_t	mca_MCE_present = FALSE;
54static boolean_t	mca_MCA_present = FALSE;
55static uint32_t		mca_family = 0;
56static unsigned int	mca_error_bank_count = 0;
57static boolean_t	mca_control_MSR_present = FALSE;
58static boolean_t	mca_threshold_status_present = FALSE;
59static boolean_t	mca_sw_error_recovery_present = FALSE;
60static boolean_t	mca_extended_MSRs_present = FALSE;
61static unsigned int	mca_extended_MSRs_count = 0;
62static boolean_t	mca_cmci_present = FALSE;
63static ia32_mcg_cap_t	ia32_mcg_cap;
64decl_simple_lock_data(static, mca_lock);
65
66typedef struct {
67	ia32_mci_ctl_t		mca_mci_ctl;
68	ia32_mci_status_t	mca_mci_status;
69	ia32_mci_misc_t		mca_mci_misc;
70	ia32_mci_addr_t		mca_mci_addr;
71} mca_mci_bank_t;
72
73typedef struct mca_state {
74	boolean_t		mca_is_saved;
75	boolean_t		mca_is_valid;	/* some state is valid */
76	ia32_mcg_ctl_t		mca_mcg_ctl;
77	ia32_mcg_status_t	mca_mcg_status;
78	mca_mci_bank_t		mca_error_bank[0];
79} mca_state_t;
80
81typedef enum {
82	CLEAR,
83	DUMPING,
84	DUMPED
85} mca_dump_state_t;
86static volatile mca_dump_state_t mca_dump_state = CLEAR;
87
88static void
89mca_get_availability(void)
90{
91	uint64_t	features = cpuid_info()->cpuid_features;
92	uint32_t	family =   cpuid_info()->cpuid_family;
93	uint32_t	model =    cpuid_info()->cpuid_model;
94	uint32_t	stepping = cpuid_info()->cpuid_stepping;
95
96	mca_MCE_present = (features & CPUID_FEATURE_MCE) != 0;
97	mca_MCA_present = (features & CPUID_FEATURE_MCA) != 0;
98	mca_family = family;
99
100	if ((model == CPUID_MODEL_HASWELL     && stepping < 3) ||
101	    (model == CPUID_MODEL_HASWELL_ULT && stepping < 1) ||
102	    (model == CPUID_MODEL_CRYSTALWELL && stepping < 1))
103		panic("Haswell pre-C0 steppings are not supported");
104
105	/*
106	 * If MCA, the number of banks etc is reported by the IA32_MCG_CAP MSR.
107	 */
108	if (mca_MCA_present) {
109		ia32_mcg_cap.u64 = rdmsr64(IA32_MCG_CAP);
110		mca_error_bank_count = ia32_mcg_cap.bits.count;
111		mca_control_MSR_present = ia32_mcg_cap.bits.mcg_ctl_p;
112		mca_threshold_status_present = ia32_mcg_cap.bits.mcg_tes_p;
113		mca_sw_error_recovery_present = ia32_mcg_cap.bits.mcg_ser_p;
114		mca_cmci_present = ia32_mcg_cap.bits.mcg_ext_corr_err_p;
115		if (family == 0x0F) {
116			mca_extended_MSRs_present = ia32_mcg_cap.bits.mcg_ext_p;
117			mca_extended_MSRs_count = ia32_mcg_cap.bits.mcg_ext_cnt;
118		}
119	}
120}
121
122void
123mca_cpu_init(void)
124{
125	unsigned int	i;
126
127	/*
128	 * The first (boot) processor is responsible for discovering the
129	 * machine check architecture present on this machine.
130	 */
131	if (!mca_initialized) {
132		mca_get_availability();
133		mca_initialized = TRUE;
134		simple_lock_init(&mca_lock, 0);
135	}
136
137	if (mca_MCA_present) {
138
139		/* Enable all MCA features */
140		if (mca_control_MSR_present)
141			wrmsr64(IA32_MCG_CTL, IA32_MCG_CTL_ENABLE);
142
143		switch (mca_family) {
144		case 0x06:
145			/* Enable all but mc0 */
146			for (i = 1; i < mca_error_bank_count; i++)
147				wrmsr64(IA32_MCi_CTL(i),0xFFFFFFFFFFFFFFFFULL);
148
149			/* Clear all errors */
150			for (i = 0; i < mca_error_bank_count; i++)
151				wrmsr64(IA32_MCi_STATUS(i), 0ULL);
152			break;
153		case 0x0F:
154			/* Enable all banks */
155			for (i = 0; i < mca_error_bank_count; i++)
156				wrmsr64(IA32_MCi_CTL(i),0xFFFFFFFFFFFFFFFFULL);
157
158			/* Clear all errors */
159			for (i = 0; i < mca_error_bank_count; i++)
160				wrmsr64(IA32_MCi_STATUS(i), 0ULL);
161			break;
162		}
163	}
164
165	/* Enable machine check exception handling if available */
166	if (mca_MCE_present) {
167		set_cr4(get_cr4()|CR4_MCE);
168	}
169}
170
171boolean_t
172mca_is_cmci_present(void)
173{
174	if (!mca_initialized)
175		mca_cpu_init();
176	return mca_cmci_present;
177}
178
179void
180mca_cpu_alloc(cpu_data_t	*cdp)
181{
182	vm_size_t	mca_state_size;
183
184	/*
185	 * Allocate space for an array of error banks.
186	 */
187	mca_state_size = sizeof(mca_state_t) +
188				sizeof(mca_mci_bank_t) * mca_error_bank_count;
189	cdp->cpu_mca_state = kalloc(mca_state_size);
190	if (cdp->cpu_mca_state == NULL) {
191		printf("mca_cpu_alloc() failed for cpu %d\n", cdp->cpu_number);
192		return;
193	}
194	bzero((void *) cdp->cpu_mca_state, mca_state_size);
195
196	/*
197	 * If the boot processor is yet have its allocation made,
198	 * do this now.
199	 */
200	if (cpu_datap(master_cpu)->cpu_mca_state == NULL)
201		mca_cpu_alloc(cpu_datap(master_cpu));
202}
203
204static void
205mca_save_state(mca_state_t *mca_state)
206{
207	mca_mci_bank_t  *bank;
208	unsigned int	i;
209
210	assert(!ml_get_interrupts_enabled() || get_preemption_level() > 0);
211
212	if  (mca_state == NULL)
213		return;
214
215	mca_state->mca_mcg_ctl = mca_control_MSR_present ?
216					rdmsr64(IA32_MCG_CTL) : 0ULL;
217	mca_state->mca_mcg_status.u64 = rdmsr64(IA32_MCG_STATUS);
218
219 	bank = (mca_mci_bank_t *) &mca_state->mca_error_bank[0];
220	for (i = 0; i < mca_error_bank_count; i++, bank++) {
221		bank->mca_mci_ctl        = rdmsr64(IA32_MCi_CTL(i));
222		bank->mca_mci_status.u64 = rdmsr64(IA32_MCi_STATUS(i));
223		if (!bank->mca_mci_status.bits.val)
224			continue;
225		bank->mca_mci_misc = (bank->mca_mci_status.bits.miscv)?
226					rdmsr64(IA32_MCi_MISC(i)) : 0ULL;
227		bank->mca_mci_addr = (bank->mca_mci_status.bits.addrv)?
228					rdmsr64(IA32_MCi_ADDR(i)) : 0ULL;
229		mca_state->mca_is_valid = TRUE;
230	}
231
232	/*
233	 * If we're the first thread with MCA state, point our package to it
234	 * and don't care about races
235	 */
236	if (x86_package()->mca_state == NULL)
237			x86_package()->mca_state = mca_state;
238
239	mca_state->mca_is_saved = TRUE;
240}
241
242void
243mca_check_save(void)
244{
245	if (mca_dump_state > CLEAR)
246		mca_save_state(current_cpu_datap()->cpu_mca_state);
247}
248
249static void mca_dump_64bit_state(void)
250{
251	kdb_printf("Extended Machine Check State:\n");
252	kdb_printf("  IA32_MCG_RAX:    0x%016qx\n", rdmsr64(IA32_MCG_RAX));
253	kdb_printf("  IA32_MCG_RBX:    0x%016qx\n", rdmsr64(IA32_MCG_RBX));
254	kdb_printf("  IA32_MCG_RCX:    0x%016qx\n", rdmsr64(IA32_MCG_RCX));
255	kdb_printf("  IA32_MCG_RDX:    0x%016qx\n", rdmsr64(IA32_MCG_RDX));
256	kdb_printf("  IA32_MCG_RSI:    0x%016qx\n", rdmsr64(IA32_MCG_RSI));
257	kdb_printf("  IA32_MCG_RDI:    0x%016qx\n", rdmsr64(IA32_MCG_RDI));
258	kdb_printf("  IA32_MCG_RBP:    0x%016qx\n", rdmsr64(IA32_MCG_RBP));
259	kdb_printf("  IA32_MCG_RSP:    0x%016qx\n", rdmsr64(IA32_MCG_RSP));
260	kdb_printf("  IA32_MCG_RFLAGS: 0x%016qx\n", rdmsr64(IA32_MCG_RFLAGS));
261	kdb_printf("  IA32_MCG_RIP:    0x%016qx\n", rdmsr64(IA32_MCG_RIP));
262	kdb_printf("  IA32_MCG_MISC:   0x%016qx\n", rdmsr64(IA32_MCG_MISC));
263	kdb_printf("  IA32_MCG_R8:     0x%016qx\n", rdmsr64(IA32_MCG_R8));
264	kdb_printf("  IA32_MCG_R9:     0x%016qx\n", rdmsr64(IA32_MCG_R9));
265	kdb_printf("  IA32_MCG_R10:    0x%016qx\n", rdmsr64(IA32_MCG_R10));
266	kdb_printf("  IA32_MCG_R11:    0x%016qx\n", rdmsr64(IA32_MCG_R11));
267	kdb_printf("  IA32_MCG_R12:    0x%016qx\n", rdmsr64(IA32_MCG_R12));
268	kdb_printf("  IA32_MCG_R13:    0x%016qx\n", rdmsr64(IA32_MCG_R13));
269	kdb_printf("  IA32_MCG_R14:    0x%016qx\n", rdmsr64(IA32_MCG_R14));
270	kdb_printf("  IA32_MCG_R15:    0x%016qx\n", rdmsr64(IA32_MCG_R15));
271}
272
273static uint32_t rdmsr32(uint32_t msr)
274{
275	return (uint32_t) rdmsr64(msr);
276}
277
278static void mca_dump_32bit_state(void)
279{
280	kdb_printf("Extended Machine Check State:\n");
281	kdb_printf("  IA32_MCG_EAX:    0x%08x\n", rdmsr32(IA32_MCG_EAX));
282	kdb_printf("  IA32_MCG_EBX:    0x%08x\n", rdmsr32(IA32_MCG_EBX));
283	kdb_printf("  IA32_MCG_ECX:    0x%08x\n", rdmsr32(IA32_MCG_ECX));
284	kdb_printf("  IA32_MCG_EDX:    0x%08x\n", rdmsr32(IA32_MCG_EDX));
285	kdb_printf("  IA32_MCG_ESI:    0x%08x\n", rdmsr32(IA32_MCG_ESI));
286	kdb_printf("  IA32_MCG_EDI:    0x%08x\n", rdmsr32(IA32_MCG_EDI));
287	kdb_printf("  IA32_MCG_EBP:    0x%08x\n", rdmsr32(IA32_MCG_EBP));
288	kdb_printf("  IA32_MCG_ESP:    0x%08x\n", rdmsr32(IA32_MCG_ESP));
289	kdb_printf("  IA32_MCG_EFLAGS: 0x%08x\n", rdmsr32(IA32_MCG_EFLAGS));
290	kdb_printf("  IA32_MCG_EIP:    0x%08x\n", rdmsr32(IA32_MCG_EIP));
291	kdb_printf("  IA32_MCG_MISC:   0x%08x\n", rdmsr32(IA32_MCG_MISC));
292}
293
294static void
295mca_report_cpu_info(void)
296{
297	i386_cpu_info_t *infop = cpuid_info();
298
299	kdb_printf(" family: %d model: %d stepping: %d microcode: %d\n",
300		infop->cpuid_family,
301		infop->cpuid_model,
302		infop->cpuid_stepping,
303		infop->cpuid_microcode_version);
304	kdb_printf(" %s\n", infop->cpuid_brand_string);
305}
306
307static const char *mc8_memory_operation[] = {
308	[MC8_MMM_GENERIC] =		"generic",
309	[MC8_MMM_READ] =		"read",
310	[MC8_MMM_WRITE] =		"write",
311	[MC8_MMM_ADDRESS_COMMAND] =	"address/command",
312	[MC8_MMM_RESERVED] =		"reserved"
313};
314
315static void
316mca_dump_bank_mc8(mca_state_t *state, int i)
317{
318	mca_mci_bank_t			*bank;
319	ia32_mci_status_t		status;
320	struct ia32_mc8_specific	mc8;
321	int				mmm;
322
323	bank = &state->mca_error_bank[i];
324	status = bank->mca_mci_status;
325	mc8 = status.bits_mc8;
326	mmm = MIN(mc8.memory_operation, MC8_MMM_RESERVED);
327
328	kdb_printf(
329		" IA32_MC%d_STATUS(0x%x): 0x%016qx %svalid\n",
330		i, IA32_MCi_STATUS(i), status.u64, IF(!status.bits.val, "in"));
331	if (!status.bits.val)
332		return;
333
334	kdb_printf(
335		"  Channel number:         %d%s\n"
336		"  Memory Operation:       %s\n"
337		"  Machine-specific error: %s%s%s%s%s%s%s%s%s\n"
338		"  COR_ERR_CNT:            %d\n",
339		mc8.channel_number,
340		IF(mc8.channel_number == 15, " (unknown)"),
341		mc8_memory_operation[mmm],
342		IF(mc8.read_ecc,            "Read ECC "),
343		IF(mc8.ecc_on_a_scrub,      "ECC on scrub "),
344		IF(mc8.write_parity,        "Write parity "),
345		IF(mc8.redundant_memory,    "Redundant memory "),
346		IF(mc8.sparing,	            "Sparing/Resilvering "),
347		IF(mc8.access_out_of_range, "Access out of Range "),
348		IF(mc8.rtid_out_of_range,   "RTID out of Range "),
349		IF(mc8.address_parity,      "Address Parity "),
350		IF(mc8.byte_enable_parity,  "Byte Enable Parity "),
351		mc8.cor_err_cnt);
352	kdb_printf(
353		"  Status bits:\n%s%s%s%s%s%s",
354		IF(status.bits.pcc,	    "   Processor context corrupt\n"),
355		IF(status.bits.addrv,	    "   ADDR register valid\n"),
356		IF(status.bits.miscv,	    "   MISC register valid\n"),
357		IF(status.bits.en,	    "   Error enabled\n"),
358		IF(status.bits.uc,	    "   Uncorrected error\n"),
359		IF(status.bits.over,	    "   Error overflow\n"));
360	if (status.bits.addrv)
361		kdb_printf(
362			" IA32_MC%d_ADDR(0x%x): 0x%016qx\n",
363			i, IA32_MCi_ADDR(i), bank->mca_mci_addr);
364	if (status.bits.miscv) {
365		ia32_mc8_misc_t	mc8_misc;
366
367		mc8_misc.u64 = bank->mca_mci_misc;
368		kdb_printf(
369			" IA32_MC%d_MISC(0x%x): 0x%016qx\n"
370			"  RTID:     %d\n"
371			"  DIMM:     %d\n"
372			"  Channel:  %d\n"
373			"  Syndrome: 0x%x\n",
374			i, IA32_MCi_MISC(i), mc8_misc.u64,
375			mc8_misc.bits.rtid,
376			mc8_misc.bits.dimm,
377			mc8_misc.bits.channel,
378			(int) mc8_misc.bits.syndrome);
379	}
380}
381
382static const char *mca_threshold_status[] = {
383	[THRESHOLD_STATUS_NO_TRACKING] =	"No tracking",
384	[THRESHOLD_STATUS_GREEN] =		"Green",
385	[THRESHOLD_STATUS_YELLOW] =		"Yellow",
386	[THRESHOLD_STATUS_RESERVED] =		"Reserved"
387};
388
389static void
390mca_dump_bank(mca_state_t *state, int i)
391{
392	mca_mci_bank_t		*bank;
393	ia32_mci_status_t	status;
394
395	bank = &state->mca_error_bank[i];
396	status = bank->mca_mci_status;
397	kdb_printf(
398		" IA32_MC%d_STATUS(0x%x): 0x%016qx %svalid\n",
399		i, IA32_MCi_STATUS(i), status.u64, IF(!status.bits.val, "in"));
400	if (!status.bits.val)
401		return;
402
403	kdb_printf(
404		"  MCA error code:            0x%04x\n",
405		status.bits.mca_error);
406	kdb_printf(
407		"  Model specific error code: 0x%04x\n",
408		status.bits.model_specific_error);
409	if (!mca_threshold_status_present) {
410		kdb_printf(
411			"  Other information:         0x%08x\n",
412			status.bits.other_information);
413	} else {
414		int	threshold = status.bits_tes_p.threshold;
415		kdb_printf(
416			"  Other information:         0x%08x\n"
417		        "  Threshold-based status:    %s\n",
418			status.bits_tes_p.other_information,
419			(status.bits_tes_p.uc == 0) ?
420			    mca_threshold_status[threshold] :
421			    "Undefined");
422	}
423	if (mca_threshold_status_present &&
424	    mca_sw_error_recovery_present) {
425		kdb_printf(
426			"  Software Error Recovery:\n%s%s",
427			IF(status.bits_tes_p.ar, "   Recovery action reqd\n"),
428			IF(status.bits_tes_p.s,  "   Signaling UCR error\n"));
429	}
430	kdb_printf(
431		"  Status bits:\n%s%s%s%s%s%s",
432		IF(status.bits.pcc,   "   Processor context corrupt\n"),
433		IF(status.bits.addrv, "   ADDR register valid\n"),
434		IF(status.bits.miscv, "   MISC register valid\n"),
435		IF(status.bits.en,    "   Error enabled\n"),
436		IF(status.bits.uc,    "   Uncorrected error\n"),
437		IF(status.bits.over,  "   Error overflow\n"));
438	if (status.bits.addrv)
439		kdb_printf(
440			" IA32_MC%d_ADDR(0x%x): 0x%016qx\n",
441			i, IA32_MCi_ADDR(i), bank->mca_mci_addr);
442	if (status.bits.miscv)
443		kdb_printf(
444			" IA32_MC%d_MISC(0x%x): 0x%016qx\n",
445			i, IA32_MCi_MISC(i), bank->mca_mci_misc);
446}
447
448static void
449mca_cpu_dump_error_banks(mca_state_t *state)
450{
451	unsigned int 		i;
452
453	if (!state->mca_is_valid)
454		return;
455
456	kdb_printf("MCA error-reporting registers:\n");
457	for (i = 0; i < mca_error_bank_count; i++ ) {
458		if (i == 8 && state == x86_package()->mca_state) {
459			/*
460			 * Fatal Memory Error
461			 */
462
463			/* Dump MC8 for this package */
464			kdb_printf(" Package %d logged:\n",
465				   x86_package()->ppkg_num);
466			mca_dump_bank_mc8(state, 8);
467			continue;
468		}
469		mca_dump_bank(state, i);
470	}
471}
472
473void
474mca_dump(void)
475{
476	mca_state_t	*mca_state = current_cpu_datap()->cpu_mca_state;
477	uint64_t	deadline;
478	unsigned int	i = 0;
479
480	/*
481	 * Capture local MCA registers to per-cpu data.
482	 */
483	mca_save_state(mca_state);
484
485	/*
486	 * Serialize: the first caller controls dumping MCA registers,
487	 * other threads spin meantime.
488	 */
489	simple_lock(&mca_lock);
490	if (mca_dump_state > CLEAR) {
491		simple_unlock(&mca_lock);
492		while (mca_dump_state == DUMPING)
493			cpu_pause();
494		return;
495	}
496	mca_dump_state = DUMPING;
497	simple_unlock(&mca_lock);
498
499	/*
500	 * Wait for all other hardware threads to save their state.
501	 * Or timeout.
502	 */
503	deadline = mach_absolute_time() + LockTimeOut;
504	while (mach_absolute_time() < deadline && i < real_ncpus) {
505		if (!cpu_datap(i)->cpu_mca_state->mca_is_saved) {
506			cpu_pause();
507			continue;
508		}
509		i += 1;
510	}
511
512	/*
513	 * Report machine-check capabilities:
514	 */
515	kdb_printf(
516		"Machine-check capabilities 0x%016qx:\n", ia32_mcg_cap.u64);
517
518	mca_report_cpu_info();
519
520	kdb_printf(
521		" %d error-reporting banks\n%s%s%s", mca_error_bank_count,
522		IF(mca_control_MSR_present,
523		   " control MSR present\n"),
524		IF(mca_threshold_status_present,
525		   " threshold-based error status present\n"),
526		IF(mca_cmci_present,
527		   " extended corrected memory error handling present\n"));
528	if (mca_extended_MSRs_present)
529		kdb_printf(
530			" %d extended MSRs present\n", mca_extended_MSRs_count);
531
532	/*
533	 * Dump all processor state:
534	 */
535	for (i = 0; i < real_ncpus; i++) {
536		mca_state_t		*mcsp = cpu_datap(i)->cpu_mca_state;
537		ia32_mcg_status_t	status;
538
539		kdb_printf("Processor %d: ", i);
540		if (mcsp == NULL ||
541		    mcsp->mca_is_saved == FALSE ||
542		    mcsp->mca_mcg_status.u64 == 0) {
543			kdb_printf("no machine-check status reported\n");
544			continue;
545		}
546		if (!mcsp->mca_is_valid) {
547			kdb_printf("no valid machine-check state\n");
548			continue;
549		}
550		status = mcsp->mca_mcg_status;
551		kdb_printf(
552			"machine-check status 0x%016qx:\n%s%s%s", status.u64,
553			IF(status.bits.ripv, " restart IP valid\n"),
554			IF(status.bits.eipv, " error IP valid\n"),
555			IF(status.bits.mcip, " machine-check in progress\n"));
556
557		mca_cpu_dump_error_banks(mcsp);
558	}
559
560	/*
561	 * Dump any extended machine state:
562	 */
563	if (mca_extended_MSRs_present) {
564		if (cpu_mode_is64bit())
565			mca_dump_64bit_state();
566		else
567			mca_dump_32bit_state();
568	}
569
570	/* Update state to release any other threads. */
571	mca_dump_state = DUMPED;
572}
573
574
575extern void mca_exception_panic(void);
576extern void mtrr_lapic_cached(void);
577void mca_exception_panic(void)
578{
579#if DEBUG
580	mtrr_lapic_cached();
581#else
582	kprintf("mca_exception_panic() requires DEBUG build\n");
583#endif
584}
585