error.c revision 817:d7deb02e90b3
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License").  You may not use this file except in compliance
7 * with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22/*
23 * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 */
26
27#pragma ident	"%Z%%M%	%I%	%E% SMI"
28
29#include <sys/types.h>
30#include <sys/machsystm.h>
31#include <sys/cpuvar.h>
32#include <sys/async.h>
33#include <sys/ontrap.h>
34#include <sys/ddifm.h>
35#include <sys/hypervisor_api.h>
36#include <sys/errorq.h>
37#include <sys/promif.h>
38#include <sys/prom_plat.h>
39#include <sys/x_call.h>
40#include <sys/error.h>
41#include <sys/fm/util.h>
42#include <sys/ivintr.h>
43
44#define	MAX_CE_FLTS		10
45#define	MAX_ASYNC_FLTS		6
46
47errorq_t *ue_queue;			/* queue of uncorrectable errors */
48errorq_t *ce_queue;			/* queue of correctable errors */
49
50/*
51 * Being used by memory test driver.
52 * ce_verbose_memory - covers CEs in DIMMs
53 * ce_verbose_other - covers "others" (ecache, IO, etc.)
54 *
55 * If the value is 0, nothing is logged.
56 * If the value is 1, the error is logged to the log file, but not console.
57 * If the value is 2, the error is logged to the log file and console.
58 */
59int	ce_verbose_memory = 1;
60int	ce_verbose_other = 1;
61
62int	ce_show_data = 0;
63int	ce_debug = 0;
64int	ue_debug = 0;
65int	reset_debug = 0;
66
67/*
68 * Tunables for controlling the handling of asynchronous faults (AFTs). Setting
69 * these to non-default values on a non-DEBUG kernel is NOT supported.
70 */
71int	aft_verbose = 0;	/* log AFT messages > 1 to log only */
72int	aft_panic = 0;		/* panic (not reboot) on fatal usermode AFLT */
73int	aft_testfatal = 0;	/* force all AFTs to panic immediately */
74
75/*
76 * Used for vbsc hostshutdown (power-off buton)
77 */
78int	err_shutdown_triggered = 0;	/* only once */
79uint_t	err_shutdown_inum = 0;		/* used to pull the trigger */
80
81/*
82 * Defined in bus_func.c but initialised in error_init
83 */
84extern kmutex_t bfd_lock;
85
86static uint32_t rq_overflow_count = 0;		/* counter for rq overflow */
87
88static void cpu_queue_one_event(errh_async_flt_t *);
89static uint32_t count_entries_on_queue(uint64_t, uint64_t, uint32_t);
90static void errh_page_settoxic(errh_async_flt_t *, uchar_t);
91static void errh_page_retire(errh_async_flt_t *);
92static int errh_error_protected(struct regs *, struct async_flt *, int *);
93static void errh_rq_full(struct async_flt *);
94static void ue_drain(void *, struct async_flt *, errorq_elem_t *);
95static void ce_drain(void *, struct async_flt *, errorq_elem_t *);
96
97/*ARGSUSED*/
98void
99process_resumable_error(struct regs *rp, uint32_t head_offset,
100    uint32_t tail_offset)
101{
102	struct machcpu *mcpup;
103	struct async_flt *aflt;
104	errh_async_flt_t errh_flt;
105	errh_er_t *head_va;
106
107	mcpup = &(CPU->cpu_m);
108
109	while (head_offset != tail_offset) {
110		/* kernel buffer starts right after the resumable queue */
111		head_va = (errh_er_t *)(mcpup->cpu_rq_va + head_offset +
112		    CPU_RQ_SIZE);
113		/* Copy the error report to local buffer */
114		bzero(&errh_flt, sizeof (errh_async_flt_t));
115		bcopy((char *)head_va, &(errh_flt.errh_er),
116		    sizeof (errh_er_t));
117
118		/* Increment the queue head */
119		head_offset += Q_ENTRY_SIZE;
120		/* Wrap around */
121		head_offset &= (CPU_RQ_SIZE - 1);
122
123		/* set error handle to zero so it can hold new error report */
124		head_va->ehdl = 0;
125
126		switch (errh_flt.errh_er.desc) {
127		case ERRH_DESC_UCOR_RE:
128			break;
129
130		case ERRH_DESC_WARN_RE:
131			/*
132			 * Power-off requested, but handle it one time only.
133			 */
134			if (!err_shutdown_triggered) {
135				setsoftint(err_shutdown_inum);
136				++err_shutdown_triggered;
137			}
138			continue;
139
140		default:
141			cmn_err(CE_WARN, "Error Descriptor 0x%llx "
142			    " invalid in resumable error handler",
143			    (long long) errh_flt.errh_er.desc);
144			continue;
145		}
146
147		aflt = (struct async_flt *)&(errh_flt.cmn_asyncflt);
148		aflt->flt_id = gethrtime();
149		aflt->flt_bus_id = getprocessorid();
150		aflt->flt_class = CPU_FAULT;
151		aflt->flt_prot = AFLT_PROT_NONE;
152		aflt->flt_priv = (((errh_flt.errh_er.attr & ERRH_MODE_MASK)
153		    >> ERRH_MODE_SHIFT) == ERRH_MODE_PRIV);
154
155		if (errh_flt.errh_er.attr & ERRH_ATTR_CPU)
156			/* If it is an error on other cpu */
157			aflt->flt_panic = 1;
158		else
159			aflt->flt_panic = 0;
160
161		/*
162		 * Handle resumable queue full case.
163		 */
164		if (errh_flt.errh_er.attr & ERRH_ATTR_RQF) {
165			(void) errh_rq_full(aflt);
166		}
167
168		/*
169		 * Queue the error on ce or ue queue depend on flt_panic.
170		 * Even if flt_panic is set, the code still keep processing
171		 * the rest element on rq until the panic starts.
172		 */
173		(void) cpu_queue_one_event(&errh_flt);
174
175		/*
176		 * Panic here if aflt->flt_panic has been set.
177		 * Enqueued errors will be logged as part of the panic flow.
178		 */
179		if (aflt->flt_panic) {
180			fm_panic("Unrecoverable error on another CPU");
181		}
182	}
183}
184
185void
186process_nonresumable_error(struct regs *rp, uint64_t tl,
187    uint32_t head_offset, uint32_t tail_offset)
188{
189	struct machcpu *mcpup;
190	struct async_flt *aflt;
191	errh_async_flt_t errh_flt;
192	errh_er_t *head_va;
193	int trampolined = 0;
194	int expected = DDI_FM_ERR_UNEXPECTED;
195	uint64_t exec_mode;
196
197	mcpup = &(CPU->cpu_m);
198
199	while (head_offset != tail_offset) {
200		/* kernel buffer starts right after the nonresumable queue */
201		head_va = (errh_er_t *)(mcpup->cpu_nrq_va + head_offset +
202		    CPU_NRQ_SIZE);
203
204		/* Copy the error report to local buffer */
205		bzero(&errh_flt, sizeof (errh_async_flt_t));
206
207		bcopy((char *)head_va, &(errh_flt.errh_er),
208		    sizeof (errh_er_t));
209
210		/* Increment the queue head */
211		head_offset += Q_ENTRY_SIZE;
212		/* Wrap around */
213		head_offset &= (CPU_NRQ_SIZE - 1);
214
215		/* set error handle to zero so it can hold new error report */
216		head_va->ehdl = 0;
217
218		aflt = (struct async_flt *)&(errh_flt.cmn_asyncflt);
219
220		trampolined = 0;
221
222		if (errh_flt.errh_er.attr & ERRH_ATTR_PIO)
223			aflt->flt_class = BUS_FAULT;
224		else
225			aflt->flt_class = CPU_FAULT;
226
227		aflt->flt_id = gethrtime();
228		aflt->flt_bus_id = getprocessorid();
229		aflt->flt_pc = (caddr_t)rp->r_pc;
230		exec_mode = (errh_flt.errh_er.attr & ERRH_MODE_MASK)
231		    >> ERRH_MODE_SHIFT;
232		aflt->flt_priv = (exec_mode == ERRH_MODE_PRIV ||
233		    exec_mode == ERRH_MODE_UNKNOWN);
234		aflt->flt_tl = (uchar_t)tl;
235		aflt->flt_prot = AFLT_PROT_NONE;
236		aflt->flt_panic = ((aflt->flt_tl != 0) ||
237		    (aft_testfatal != 0));
238
239		switch (errh_flt.errh_er.desc) {
240		case ERRH_DESC_PR_NRE:
241			/*
242			 * Fall through, precise fault also need to check
243			 * to see if it was protected.
244			 */
245
246		case ERRH_DESC_DEF_NRE:
247			/*
248			 * If the trap occurred in privileged mode at TL=0,
249			 * we need to check to see if we were executing
250			 * in kernel under on_trap() or t_lofault
251			 * protection. If so, modify the saved registers
252			 * so that we return from the trap to the
253			 * appropriate trampoline routine.
254			 */
255			if (aflt->flt_priv == 1 && aflt->flt_tl == 0)
256				trampolined =
257				    errh_error_protected(rp, aflt, &expected);
258
259			if (!aflt->flt_priv || aflt->flt_prot ==
260			    AFLT_PROT_COPY) {
261				aflt->flt_panic |= aft_panic;
262			} else if (!trampolined &&
263			    aflt->flt_class != BUS_FAULT) {
264				aflt->flt_panic = 1;
265			}
266
267			/*
268			 * If PIO error, we need to query the bus nexus
269			 * for fatal errors.
270			 */
271			if (aflt->flt_class == BUS_FAULT) {
272				aflt->flt_addr = errh_flt.errh_er.ra;
273				errh_cpu_run_bus_error_handlers(aflt,
274				    expected);
275			}
276
277			break;
278
279		default:
280			cmn_err(CE_WARN, "Error Descriptor 0x%llx "
281			    " invalid in nonresumable error handler",
282			    (long long) errh_flt.errh_er.desc);
283			continue;
284		}
285
286		/*
287		 * Queue the error report for further processing. If
288		 * flt_panic is set, code still process other errors
289		 * in the queue until the panic routine stops the
290		 * kernel.
291		 */
292		(void) cpu_queue_one_event(&errh_flt);
293
294		/*
295		 * Panic here if aflt->flt_panic has been set.
296		 * Enqueued errors will be logged as part of the panic flow.
297		 */
298		if (aflt->flt_panic) {
299			fm_panic("Unrecoverable hardware error");
300		}
301
302		/*
303		 * If it is a memory error, we turn on the PAGE_IS_TOXIC
304		 * flag. The page will be retired later and scrubbed when
305		 * it is freed.
306		 */
307		if (errh_flt.errh_er.attr & ERRH_ATTR_MEM)
308			(void) errh_page_settoxic(&errh_flt, PAGE_IS_TOXIC);
309
310		/*
311		 * If we queued an error and the it was in user mode or
312		 * protected by t_lofault,
313		 * set AST flag so the queue will be drained before
314		 * returning to user mode.
315		 */
316		if (!aflt->flt_priv || aflt->flt_prot == AFLT_PROT_COPY) {
317			int pcb_flag = 0;
318
319			if (aflt->flt_class == CPU_FAULT)
320				pcb_flag |= ASYNC_HWERR;
321			else if (aflt->flt_class == BUS_FAULT)
322				pcb_flag |= ASYNC_BERR;
323
324			ttolwp(curthread)->lwp_pcb.pcb_flags |= pcb_flag;
325			aston(curthread);
326		}
327	}
328}
329
330/*
331 * For PIO errors, this routine calls nexus driver's error
332 * callback routines. If the callback routine returns fatal, and
333 * we are in kernel or unknow mode without any error protection,
334 * we need to turn on the panic flag.
335 */
336void
337errh_cpu_run_bus_error_handlers(struct async_flt *aflt, int expected)
338{
339	int status;
340	ddi_fm_error_t de;
341
342	bzero(&de, sizeof (ddi_fm_error_t));
343
344	de.fme_version = DDI_FME_VERSION;
345	de.fme_ena = fm_ena_generate(aflt->flt_id, FM_ENA_FMT1);
346	de.fme_flag = expected;
347	de.fme_bus_specific = (void *)aflt->flt_addr;
348	status = ndi_fm_handler_dispatch(ddi_root_node(), NULL, &de);
349
350	/*
351	 * If error is protected, it will jump to proper routine
352	 * to handle the handle; if it is in user level, we just
353	 * kill the user process; if the driver thinks the error is
354	 * not fatal, we can drive on. If none of above are true,
355	 * we panic
356	 */
357	if ((aflt->flt_prot == AFLT_PROT_NONE) && (aflt->flt_priv == 1) &&
358	    (status == DDI_FM_FATAL))
359		aflt->flt_panic = 1;
360}
361
362/*
363 * This routine checks to see if we are under any error protection when
364 * the error happens. If we are under error protection, we unwind to
365 * the protection and indicate fault.
366 */
367static int
368errh_error_protected(struct regs *rp, struct async_flt *aflt, int *expected)
369{
370	int trampolined = 0;
371	ddi_acc_hdl_t *hp;
372
373	if (curthread->t_ontrap != NULL) {
374		on_trap_data_t *otp = curthread->t_ontrap;
375
376		if (otp->ot_prot & OT_DATA_EC) {
377			aflt->flt_prot = AFLT_PROT_EC;
378			otp->ot_trap |= OT_DATA_EC;
379			rp->r_pc = otp->ot_trampoline;
380			rp->r_npc = rp->r_pc +4;
381			trampolined = 1;
382		}
383
384		if (otp->ot_prot & OT_DATA_ACCESS) {
385			aflt->flt_prot = AFLT_PROT_ACCESS;
386			otp->ot_trap |= OT_DATA_ACCESS;
387			rp->r_pc = otp->ot_trampoline;
388			rp->r_npc = rp->r_pc + 4;
389			trampolined = 1;
390			/*
391			 * for peek and caut_gets
392			 * errors are expected
393			 */
394			hp = (ddi_acc_hdl_t *)otp->ot_handle;
395			if (!hp)
396				*expected = DDI_FM_ERR_PEEK;
397			else if (hp->ah_acc.devacc_attr_access ==
398			    DDI_CAUTIOUS_ACC)
399				*expected = DDI_FM_ERR_EXPECTED;
400		}
401	} else if (curthread->t_lofault) {
402		aflt->flt_prot = AFLT_PROT_COPY;
403		rp->r_g1 = EFAULT;
404		rp->r_pc = curthread->t_lofault;
405		rp->r_npc = rp->r_pc + 4;
406		trampolined = 1;
407	}
408
409	return (trampolined);
410}
411
412/*
413 * Queue one event.
414 */
415static void
416cpu_queue_one_event(errh_async_flt_t *errh_fltp)
417{
418	struct async_flt *aflt = (struct async_flt *)errh_fltp;
419	errorq_t *eqp;
420
421	if (aflt->flt_panic)
422		eqp = ue_queue;
423	else
424		eqp = ce_queue;
425
426	errorq_dispatch(eqp, errh_fltp, sizeof (errh_async_flt_t),
427	    aflt->flt_panic);
428}
429
430/*
431 * The cpu_async_log_err() function is called by the ce/ue_drain() function to
432 * handle logging for CPU events that are dequeued.  As such, it can be invoked
433 * from softint context, from AST processing in the trap() flow, or from the
434 * panic flow.  We decode the CPU-specific data, and log appropriate messages.
435 */
436void
437cpu_async_log_err(void *flt)
438{
439	errh_async_flt_t *errh_fltp = (errh_async_flt_t *)flt;
440	errh_er_t *errh_erp = (errh_er_t *)&errh_fltp->errh_er;
441
442	switch (errh_erp->desc) {
443	case ERRH_DESC_UCOR_RE:
444		if (errh_erp->attr & ERRH_ATTR_MEM) {
445			/*
446			 * Turn on the PAGE_IS_TOXIC flag. The page will be
447			 * scrubbed when it is freed.
448			 */
449			(void) errh_page_settoxic(errh_fltp, PAGE_IS_TOXIC);
450		}
451
452		break;
453
454	case ERRH_DESC_PR_NRE:
455	case ERRH_DESC_DEF_NRE:
456		if (errh_erp->attr & ERRH_ATTR_MEM) {
457			/*
458			 * For non-resumable memory error, retire
459			 * the page here.
460			 */
461			errh_page_retire(errh_fltp);
462
463			/*
464			 * If we are going to panic, scrub the page first
465			 */
466			if (errh_fltp->cmn_asyncflt.flt_panic)
467				mem_scrub(errh_fltp->errh_er.ra,
468				    errh_fltp->errh_er.sz);
469		}
470		break;
471
472	default:
473		break;
474	}
475}
476
477/*
478 * Called from ce_drain().
479 */
480void
481cpu_ce_log_err(struct async_flt *aflt)
482{
483	switch (aflt->flt_class) {
484	case CPU_FAULT:
485		cpu_async_log_err(aflt);
486		break;
487
488	case BUS_FAULT:
489		cpu_async_log_err(aflt);
490		break;
491
492	default:
493		break;
494	}
495}
496
497/*
498 * Called from ue_drain().
499 */
500void
501cpu_ue_log_err(struct async_flt *aflt)
502{
503	switch (aflt->flt_class) {
504	case CPU_FAULT:
505		cpu_async_log_err(aflt);
506		break;
507
508	case BUS_FAULT:
509		cpu_async_log_err(aflt);
510		break;
511
512	default:
513		break;
514	}
515}
516
517/*
518 * Turn on flag on the error memory region.
519 */
520static void
521errh_page_settoxic(errh_async_flt_t *errh_fltp, uchar_t flag)
522{
523	page_t *pp;
524	uint64_t flt_real_addr_start = errh_fltp->errh_er.ra;
525	uint64_t flt_real_addr_end = flt_real_addr_start +
526	    errh_fltp->errh_er.sz - 1;
527	int64_t current_addr;
528
529	if (errh_fltp->errh_er.sz == 0)
530		return;
531
532	for (current_addr = flt_real_addr_start;
533	    current_addr < flt_real_addr_end; current_addr += MMU_PAGESIZE) {
534		pp = page_numtopp_nolock((pfn_t)
535		    (current_addr >> MMU_PAGESHIFT));
536
537		if (pp != NULL) {
538			page_settoxic(pp, flag);
539		}
540	}
541}
542
543/*
544 * Retire the page(s) indicated in the error report.
545 */
546static void
547errh_page_retire(errh_async_flt_t *errh_fltp)
548{
549	page_t *pp;
550	uint64_t flt_real_addr_start = errh_fltp->errh_er.ra;
551	uint64_t flt_real_addr_end = flt_real_addr_start +
552	    errh_fltp->errh_er.sz - 1;
553	int64_t current_addr;
554
555	if (errh_fltp->errh_er.sz == 0)
556		return;
557
558	for (current_addr = flt_real_addr_start;
559	    current_addr < flt_real_addr_end; current_addr += MMU_PAGESIZE) {
560		pp = page_numtopp_nolock((pfn_t)
561		    (current_addr >> MMU_PAGESHIFT));
562
563		if (pp != NULL) {
564			(void) page_retire(pp, PAGE_IS_TOXIC);
565		}
566	}
567}
568
569void
570mem_scrub(uint64_t paddr, uint64_t len)
571{
572	uint64_t pa, length, scrubbed_len;
573
574	pa = paddr;
575	length = len;
576	scrubbed_len = 0;
577
578	while (length > 0) {
579		if (hv_mem_scrub(pa, length, &scrubbed_len) != H_EOK)
580			break;
581
582		pa += scrubbed_len;
583		length -= scrubbed_len;
584	}
585}
586
587void
588mem_sync(caddr_t va, size_t len)
589{
590	uint64_t pa, length, flushed;
591
592	pa = va_to_pa((caddr_t)va);
593
594	if (pa == (uint64_t)-1)
595		return;
596
597	length = len;
598	flushed = 0;
599
600	while (length > 0) {
601		if (hv_mem_sync(pa, length, &flushed) != H_EOK)
602			break;
603
604		pa += flushed;
605		length -= flushed;
606	}
607}
608
609/*
610 * If resumable queue is full, we need to check if any cpu is in
611 * error state. If not, we drive on. If yes, we need to panic. The
612 * hypervisor call hv_cpu_state() is being used for checking the
613 * cpu state.
614 */
615static void
616errh_rq_full(struct async_flt *afltp)
617{
618	processorid_t who;
619	uint64_t cpu_state;
620	uint64_t retval;
621
622	for (who = 0; who < NCPU; who++)
623		if (CPU_IN_SET(cpu_ready_set, who)) {
624			retval = hv_cpu_state(who, &cpu_state);
625			if (retval != H_EOK || cpu_state == CPU_STATE_ERROR) {
626				afltp->flt_panic = 1;
627				break;
628			}
629		}
630}
631
632/*
633 * Return processor specific async error structure
634 * size used.
635 */
636int
637cpu_aflt_size(void)
638{
639	return (sizeof (errh_async_flt_t));
640}
641
642#define	SZ_TO_ETRS_SHIFT	6
643
644/*
645 * Message print out when resumable queue is overflown
646 */
647/*ARGSUSED*/
648void
649rq_overflow(struct regs *rp, uint64_t head_offset,
650    uint64_t tail_offset)
651{
652	rq_overflow_count++;
653}
654
655/*
656 * Handler to process a fatal error.  This routine can be called from a
657 * softint, called from trap()'s AST handling, or called from the panic flow.
658 */
659/*ARGSUSED*/
660static void
661ue_drain(void *ignored, struct async_flt *aflt, errorq_elem_t *eqep)
662{
663	cpu_ue_log_err(aflt);
664}
665
666/*
667 * Handler to process a correctable error.  This routine can be called from a
668 * softint.  We just call the CPU module's logging routine.
669 */
670/*ARGSUSED*/
671static void
672ce_drain(void *ignored, struct async_flt *aflt, errorq_elem_t *eqep)
673{
674	cpu_ce_log_err(aflt);
675}
676
677/*
678 * Handler to process vbsc hostshutdown (power-off button).
679 */
680static int
681err_shutdown_softintr()
682{
683	cmn_err(CE_WARN, "Power-off requested, system will now shutdown.");
684	do_shutdown();
685
686	/*
687	 * just in case do_shutdown() fails
688	 */
689	(void) timeout((void(*)(void *))power_down, NULL, 100 * hz);
690	return (DDI_INTR_CLAIMED);
691}
692
693/*
694 * Allocate error queue sizes based on max_ncpus.  max_ncpus is set just
695 * after ncpunode has been determined.  ncpus is set in start_other_cpus
696 * which is called after error_init() but may change dynamically.
697 */
698void
699error_init(void)
700{
701	char tmp_name[MAXSYSNAME];
702	pnode_t node;
703	size_t size = cpu_aflt_size();
704
705	/*
706	 * Initialize the correctable and uncorrectable error queues.
707	 */
708	ue_queue = errorq_create("ue_queue", (errorq_func_t)ue_drain, NULL,
709	    MAX_ASYNC_FLTS * (max_ncpus + 1), size, PIL_2, ERRORQ_VITAL);
710
711	ce_queue = errorq_create("ce_queue", (errorq_func_t)ce_drain, NULL,
712	    MAX_CE_FLTS * (max_ncpus + 1), size, PIL_1, 0);
713
714	if (ue_queue == NULL || ce_queue == NULL)
715		panic("failed to create required system error queue");
716
717	/*
718	 * Setup interrupt handler for power-off button.
719	 */
720	err_shutdown_inum = add_softintr(PIL_9,
721	    (softintrfunc)err_shutdown_softintr, NULL);
722
723	/*
724	 * Initialize the busfunc list mutex.  This must be a PIL_15 spin lock
725	 * because we will need to acquire it from cpu_async_error().
726	 */
727	mutex_init(&bfd_lock, NULL, MUTEX_SPIN, (void *)PIL_15);
728
729	node = prom_rootnode();
730	if ((node == OBP_NONODE) || (node == OBP_BADNODE)) {
731		cmn_err(CE_CONT, "error_init: node 0x%x\n", (uint_t)node);
732		return;
733	}
734
735	if (((size = prom_getproplen(node, "reset-reason")) != -1) &&
736	    (size <= MAXSYSNAME) &&
737	    (prom_getprop(node, "reset-reason", tmp_name) != -1)) {
738		if (reset_debug) {
739			cmn_err(CE_CONT, "System booting after %s\n", tmp_name);
740		} else if (strncmp(tmp_name, "FATAL", 5) == 0) {
741			cmn_err(CE_CONT,
742			    "System booting after fatal error %s\n", tmp_name);
743		}
744	}
745}
746
747/*
748 * Nonresumable queue is full, panic here
749 */
750/*ARGSUSED*/
751void
752nrq_overflow(struct regs *rp)
753{
754	fm_panic("Nonresumable queue full");
755}
756