1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24
25/*
26 * Fault Management Architecture (FMA) Resource and Protocol Support
27 *
28 * The routines contained herein provide services to support kernel subsystems
29 * in publishing fault management telemetry (see PSARC 2002/412 and 2003/089).
30 *
31 * Name-Value Pair Lists
32 *
33 * The embodiment of an FMA protocol element (event, fmri or authority) is a
34 * name-value pair list (nvlist_t).  FMA-specific nvlist construtor and
35 * destructor functions, fm_nvlist_create() and fm_nvlist_destroy(), are used
36 * to create an nvpair list using custom allocators.  Callers may choose to
37 * allocate either from the kernel memory allocator, or from a preallocated
38 * buffer, useful in constrained contexts like high-level interrupt routines.
39 *
40 * Protocol Event and FMRI Construction
41 *
42 * Convenience routines are provided to construct nvlist events according to
43 * the FMA Event Protocol and Naming Schema specification for ereports and
44 * FMRIs for the dev, cpu, hc, mem, legacy hc and de schemes.
45 *
46 * ENA Manipulation
47 *
48 * Routines to generate ENA formats 0, 1 and 2 are available as well as
49 * routines to increment formats 1 and 2.  Individual fields within the
50 * ENA are extractable via fm_ena_time_get(), fm_ena_id_get(),
51 * fm_ena_format_get() and fm_ena_gen_get().
52 */
53
54#include <sys/types.h>
55#include <sys/time.h>
56#include <sys/sysevent.h>
57#include <sys/nvpair.h>
58#include <sys/cmn_err.h>
59#include <sys/cpuvar.h>
60#include <sys/sysmacros.h>
61#include <sys/systm.h>
62#include <sys/compress.h>
63#include <sys/cpuvar.h>
64#include <sys/kobj.h>
65#include <sys/kstat.h>
66#include <sys/processor.h>
67#include <sys/pcpu.h>
68#include <sys/sunddi.h>
69#include <sys/systeminfo.h>
70#include <sys/sysevent/eventdefs.h>
71#include <sys/fm/util.h>
72#include <sys/fm/protocol.h>
73
74/*
75 * URL and SUNW-MSG-ID value to display for fm_panic(), defined below.  These
76 * values must be kept in sync with the FMA source code in usr/src/cmd/fm.
77 */
78static const char *fm_url = "http://www.sun.com/msg";
79static const char *fm_msgid = "SUNOS-8000-0G";
80static char *volatile fm_panicstr = NULL;
81
82#ifdef illumos
83errorq_t *ereport_errorq;
84#endif
85void *ereport_dumpbuf;
86size_t ereport_dumplen;
87
88static uint_t ereport_chanlen = ERPT_EVCH_MAX;
89static evchan_t *ereport_chan = NULL;
90static ulong_t ereport_qlen = 0;
91static size_t ereport_size = 0;
92static int ereport_cols = 80;
93
94extern void fastreboot_disable_highpil(void);
95
96/*
97 * Common fault management kstats to record ereport generation
98 * failures
99 */
100
101struct erpt_kstat {
102	kstat_named_t	erpt_dropped;		/* num erpts dropped on post */
103	kstat_named_t	erpt_set_failed;	/* num erpt set failures */
104	kstat_named_t	fmri_set_failed;	/* num fmri set failures */
105	kstat_named_t	payload_set_failed;	/* num payload set failures */
106};
107
108static struct erpt_kstat erpt_kstat_data = {
109	{ "erpt-dropped", KSTAT_DATA_UINT64 },
110	{ "erpt-set-failed", KSTAT_DATA_UINT64 },
111	{ "fmri-set-failed", KSTAT_DATA_UINT64 },
112	{ "payload-set-failed", KSTAT_DATA_UINT64 }
113};
114
115#ifdef illumos
116/*ARGSUSED*/
117static void
118fm_drain(void *private, void *data, errorq_elem_t *eep)
119{
120	nvlist_t *nvl = errorq_elem_nvl(ereport_errorq, eep);
121
122	if (!panicstr)
123		(void) fm_ereport_post(nvl, EVCH_TRYHARD);
124	else
125		fm_nvprint(nvl);
126}
127#endif
128
129void
130fm_init(void)
131{
132	kstat_t *ksp;
133
134#ifdef illumos
135	(void) sysevent_evc_bind(FM_ERROR_CHAN,
136	    &ereport_chan, EVCH_CREAT | EVCH_HOLD_PEND);
137
138	(void) sysevent_evc_control(ereport_chan,
139	    EVCH_SET_CHAN_LEN, &ereport_chanlen);
140#endif
141
142	if (ereport_qlen == 0)
143		ereport_qlen = ERPT_MAX_ERRS * MAX(max_ncpus, 4);
144
145	if (ereport_size == 0)
146		ereport_size = ERPT_DATA_SZ;
147
148#ifdef illumos
149	ereport_errorq = errorq_nvcreate("fm_ereport_queue",
150	    (errorq_func_t)fm_drain, NULL, ereport_qlen, ereport_size,
151	    FM_ERR_PIL, ERRORQ_VITAL);
152	if (ereport_errorq == NULL)
153		panic("failed to create required ereport error queue");
154#endif
155
156	ereport_dumpbuf = kmem_alloc(ereport_size, KM_SLEEP);
157	ereport_dumplen = ereport_size;
158
159	/* Initialize ereport allocation and generation kstats */
160	ksp = kstat_create("unix", 0, "fm", "misc", KSTAT_TYPE_NAMED,
161	    sizeof (struct erpt_kstat) / sizeof (kstat_named_t),
162	    KSTAT_FLAG_VIRTUAL);
163
164	if (ksp != NULL) {
165		ksp->ks_data = &erpt_kstat_data;
166		kstat_install(ksp);
167	} else {
168		cmn_err(CE_NOTE, "failed to create fm/misc kstat\n");
169
170	}
171}
172
173#ifdef illumos
174/*
175 * Formatting utility function for fm_nvprintr.  We attempt to wrap chunks of
176 * output so they aren't split across console lines, and return the end column.
177 */
178/*PRINTFLIKE4*/
179static int
180fm_printf(int depth, int c, int cols, const char *format, ...)
181{
182	va_list ap;
183	int width;
184	char c1;
185
186	va_start(ap, format);
187	width = vsnprintf(&c1, sizeof (c1), format, ap);
188	va_end(ap);
189
190	if (c + width >= cols) {
191		console_printf("\n\r");
192		c = 0;
193		if (format[0] != ' ' && depth > 0) {
194			console_printf(" ");
195			c++;
196		}
197	}
198
199	va_start(ap, format);
200	console_vprintf(format, ap);
201	va_end(ap);
202
203	return ((c + width) % cols);
204}
205
206/*
207 * Recursively print a nvlist in the specified column width and return the
208 * column we end up in.  This function is called recursively by fm_nvprint(),
209 * below.  We generically format the entire nvpair using hexadecimal
210 * integers and strings, and elide any integer arrays.  Arrays are basically
211 * used for cache dumps right now, so we suppress them so as not to overwhelm
212 * the amount of console output we produce at panic time.  This can be further
213 * enhanced as FMA technology grows based upon the needs of consumers.  All
214 * FMA telemetry is logged using the dump device transport, so the console
215 * output serves only as a fallback in case this procedure is unsuccessful.
216 */
217static int
218fm_nvprintr(nvlist_t *nvl, int d, int c, int cols)
219{
220	nvpair_t *nvp;
221
222	for (nvp = nvlist_next_nvpair(nvl, NULL);
223	    nvp != NULL; nvp = nvlist_next_nvpair(nvl, nvp)) {
224
225		data_type_t type = nvpair_type(nvp);
226		const char *name = nvpair_name(nvp);
227
228		boolean_t b;
229		uint8_t i8;
230		uint16_t i16;
231		uint32_t i32;
232		uint64_t i64;
233		char *str;
234		nvlist_t *cnv;
235
236		if (strcmp(name, FM_CLASS) == 0)
237			continue; /* already printed by caller */
238
239		c = fm_printf(d, c, cols, " %s=", name);
240
241		switch (type) {
242		case DATA_TYPE_BOOLEAN:
243			c = fm_printf(d + 1, c, cols, " 1");
244			break;
245
246		case DATA_TYPE_BOOLEAN_VALUE:
247			(void) nvpair_value_boolean_value(nvp, &b);
248			c = fm_printf(d + 1, c, cols, b ? "1" : "0");
249			break;
250
251		case DATA_TYPE_BYTE:
252			(void) nvpair_value_byte(nvp, &i8);
253			c = fm_printf(d + 1, c, cols, "%x", i8);
254			break;
255
256		case DATA_TYPE_INT8:
257			(void) nvpair_value_int8(nvp, (void *)&i8);
258			c = fm_printf(d + 1, c, cols, "%x", i8);
259			break;
260
261		case DATA_TYPE_UINT8:
262			(void) nvpair_value_uint8(nvp, &i8);
263			c = fm_printf(d + 1, c, cols, "%x", i8);
264			break;
265
266		case DATA_TYPE_INT16:
267			(void) nvpair_value_int16(nvp, (void *)&i16);
268			c = fm_printf(d + 1, c, cols, "%x", i16);
269			break;
270
271		case DATA_TYPE_UINT16:
272			(void) nvpair_value_uint16(nvp, &i16);
273			c = fm_printf(d + 1, c, cols, "%x", i16);
274			break;
275
276		case DATA_TYPE_INT32:
277			(void) nvpair_value_int32(nvp, (void *)&i32);
278			c = fm_printf(d + 1, c, cols, "%x", i32);
279			break;
280
281		case DATA_TYPE_UINT32:
282			(void) nvpair_value_uint32(nvp, &i32);
283			c = fm_printf(d + 1, c, cols, "%x", i32);
284			break;
285
286		case DATA_TYPE_INT64:
287			(void) nvpair_value_int64(nvp, (void *)&i64);
288			c = fm_printf(d + 1, c, cols, "%llx",
289			    (u_longlong_t)i64);
290			break;
291
292		case DATA_TYPE_UINT64:
293			(void) nvpair_value_uint64(nvp, &i64);
294			c = fm_printf(d + 1, c, cols, "%llx",
295			    (u_longlong_t)i64);
296			break;
297
298		case DATA_TYPE_HRTIME:
299			(void) nvpair_value_hrtime(nvp, (void *)&i64);
300			c = fm_printf(d + 1, c, cols, "%llx",
301			    (u_longlong_t)i64);
302			break;
303
304		case DATA_TYPE_STRING:
305			(void) nvpair_value_string(nvp, &str);
306			c = fm_printf(d + 1, c, cols, "\"%s\"",
307			    str ? str : "<NULL>");
308			break;
309
310		case DATA_TYPE_NVLIST:
311			c = fm_printf(d + 1, c, cols, "[");
312			(void) nvpair_value_nvlist(nvp, &cnv);
313			c = fm_nvprintr(cnv, d + 1, c, cols);
314			c = fm_printf(d + 1, c, cols, " ]");
315			break;
316
317		case DATA_TYPE_NVLIST_ARRAY: {
318			nvlist_t **val;
319			uint_t i, nelem;
320
321			c = fm_printf(d + 1, c, cols, "[");
322			(void) nvpair_value_nvlist_array(nvp, &val, &nelem);
323			for (i = 0; i < nelem; i++) {
324				c = fm_nvprintr(val[i], d + 1, c, cols);
325			}
326			c = fm_printf(d + 1, c, cols, " ]");
327			}
328			break;
329
330		case DATA_TYPE_BOOLEAN_ARRAY:
331		case DATA_TYPE_BYTE_ARRAY:
332		case DATA_TYPE_INT8_ARRAY:
333		case DATA_TYPE_UINT8_ARRAY:
334		case DATA_TYPE_INT16_ARRAY:
335		case DATA_TYPE_UINT16_ARRAY:
336		case DATA_TYPE_INT32_ARRAY:
337		case DATA_TYPE_UINT32_ARRAY:
338		case DATA_TYPE_INT64_ARRAY:
339		case DATA_TYPE_UINT64_ARRAY:
340		case DATA_TYPE_STRING_ARRAY:
341			c = fm_printf(d + 1, c, cols, "[...]");
342			break;
343		case DATA_TYPE_UNKNOWN:
344			c = fm_printf(d + 1, c, cols, "<unknown>");
345			break;
346		}
347	}
348
349	return (c);
350}
351
352void
353fm_nvprint(nvlist_t *nvl)
354{
355	char *class;
356	int c = 0;
357
358	console_printf("\r");
359
360	if (nvlist_lookup_string(nvl, FM_CLASS, &class) == 0)
361		c = fm_printf(0, c, ereport_cols, "%s", class);
362
363	if (fm_nvprintr(nvl, 0, c, ereport_cols) != 0)
364		console_printf("\n");
365
366	console_printf("\n");
367}
368
369/*
370 * Wrapper for panic() that first produces an FMA-style message for admins.
371 * Normally such messages are generated by fmd(1M)'s syslog-msgs agent: this
372 * is the one exception to that rule and the only error that gets messaged.
373 * This function is intended for use by subsystems that have detected a fatal
374 * error and enqueued appropriate ereports and wish to then force a panic.
375 */
376/*PRINTFLIKE1*/
377void
378fm_panic(const char *format, ...)
379{
380	va_list ap;
381
382	(void) atomic_cas_ptr((void *)&fm_panicstr, NULL, (void *)format);
383#if defined(__i386) || defined(__amd64)
384	fastreboot_disable_highpil();
385#endif /* __i386 || __amd64 */
386	va_start(ap, format);
387	vpanic(format, ap);
388	va_end(ap);
389}
390
391/*
392 * Simply tell the caller if fm_panicstr is set, ie. an fma event has
393 * caused the panic. If so, something other than the default panic
394 * diagnosis method will diagnose the cause of the panic.
395 */
396int
397is_fm_panic()
398{
399	if (fm_panicstr)
400		return (1);
401	else
402		return (0);
403}
404
405/*
406 * Print any appropriate FMA banner message before the panic message.  This
407 * function is called by panicsys() and prints the message for fm_panic().
408 * We print the message here so that it comes after the system is quiesced.
409 * A one-line summary is recorded in the log only (cmn_err(9F) with "!" prefix).
410 * The rest of the message is for the console only and not needed in the log,
411 * so it is printed using console_printf().  We break it up into multiple
412 * chunks so as to avoid overflowing any small legacy prom_printf() buffers.
413 */
414void
415fm_banner(void)
416{
417	timespec_t tod;
418	hrtime_t now;
419
420	if (!fm_panicstr)
421		return; /* panic was not initiated by fm_panic(); do nothing */
422
423	if (panicstr) {
424		tod = panic_hrestime;
425		now = panic_hrtime;
426	} else {
427		gethrestime(&tod);
428		now = gethrtime_waitfree();
429	}
430
431	cmn_err(CE_NOTE, "!SUNW-MSG-ID: %s, "
432	    "TYPE: Error, VER: 1, SEVERITY: Major\n", fm_msgid);
433
434	console_printf(
435"\n\rSUNW-MSG-ID: %s, TYPE: Error, VER: 1, SEVERITY: Major\n"
436"EVENT-TIME: 0x%lx.0x%lx (0x%llx)\n",
437	    fm_msgid, tod.tv_sec, tod.tv_nsec, (u_longlong_t)now);
438
439	console_printf(
440"PLATFORM: %s, CSN: -, HOSTNAME: %s\n"
441"SOURCE: %s, REV: %s %s\n",
442	    platform, utsname.nodename, utsname.sysname,
443	    utsname.release, utsname.version);
444
445	console_printf(
446"DESC: Errors have been detected that require a reboot to ensure system\n"
447"integrity.  See %s/%s for more information.\n",
448	    fm_url, fm_msgid);
449
450	console_printf(
451"AUTO-RESPONSE: Solaris will attempt to save and diagnose the error telemetry\n"
452"IMPACT: The system will sync files, save a crash dump if needed, and reboot\n"
453"REC-ACTION: Save the error summary below in case telemetry cannot be saved\n");
454
455	console_printf("\n");
456}
457
458/*
459 * Utility function to write all of the pending ereports to the dump device.
460 * This function is called at either normal reboot or panic time, and simply
461 * iterates over the in-transit messages in the ereport sysevent channel.
462 */
463void
464fm_ereport_dump(void)
465{
466	evchanq_t *chq;
467	sysevent_t *sep;
468	erpt_dump_t ed;
469
470	timespec_t tod;
471	hrtime_t now;
472	char *buf;
473	size_t len;
474
475	if (panicstr) {
476		tod = panic_hrestime;
477		now = panic_hrtime;
478	} else {
479		if (ereport_errorq != NULL)
480			errorq_drain(ereport_errorq);
481		gethrestime(&tod);
482		now = gethrtime_waitfree();
483	}
484
485	/*
486	 * In the panic case, sysevent_evc_walk_init() will return NULL.
487	 */
488	if ((chq = sysevent_evc_walk_init(ereport_chan, NULL)) == NULL &&
489	    !panicstr)
490		return; /* event channel isn't initialized yet */
491
492	while ((sep = sysevent_evc_walk_step(chq)) != NULL) {
493		if ((buf = sysevent_evc_event_attr(sep, &len)) == NULL)
494			break;
495
496		ed.ed_magic = ERPT_MAGIC;
497		ed.ed_chksum = checksum32(buf, len);
498		ed.ed_size = (uint32_t)len;
499		ed.ed_pad = 0;
500		ed.ed_hrt_nsec = SE_TIME(sep);
501		ed.ed_hrt_base = now;
502		ed.ed_tod_base.sec = tod.tv_sec;
503		ed.ed_tod_base.nsec = tod.tv_nsec;
504
505		dumpvp_write(&ed, sizeof (ed));
506		dumpvp_write(buf, len);
507	}
508
509	sysevent_evc_walk_fini(chq);
510}
511#endif
512
513/*
514 * Post an error report (ereport) to the sysevent error channel.  The error
515 * channel must be established with a prior call to sysevent_evc_create()
516 * before publication may occur.
517 */
518void
519fm_ereport_post(nvlist_t *ereport, int evc_flag)
520{
521	size_t nvl_size = 0;
522	evchan_t *error_chan;
523	sysevent_id_t eid;
524
525	(void) nvlist_size(ereport, &nvl_size, NV_ENCODE_NATIVE);
526	if (nvl_size > ERPT_DATA_SZ || nvl_size == 0) {
527		atomic_inc_64(&erpt_kstat_data.erpt_dropped.value.ui64);
528		return;
529	}
530
531#ifdef illumos
532	if (sysevent_evc_bind(FM_ERROR_CHAN, &error_chan,
533	    EVCH_CREAT|EVCH_HOLD_PEND) != 0) {
534		atomic_inc_64(&erpt_kstat_data.erpt_dropped.value.ui64);
535		return;
536	}
537
538	if (sysevent_evc_publish(error_chan, EC_FM, ESC_FM_ERROR,
539	    SUNW_VENDOR, FM_PUB, ereport, evc_flag) != 0) {
540		atomic_inc_64(&erpt_kstat_data.erpt_dropped.value.ui64);
541		(void) sysevent_evc_unbind(error_chan);
542		return;
543	}
544	(void) sysevent_evc_unbind(error_chan);
545#else
546	(void) ddi_log_sysevent(NULL, SUNW_VENDOR, EC_DEV_STATUS,
547	    ESC_DEV_DLE, ereport, &eid, DDI_SLEEP);
548#endif
549}
550
551/*
552 * Wrapppers for FM nvlist allocators
553 */
554/* ARGSUSED */
555static void *
556i_fm_alloc(nv_alloc_t *nva, size_t size)
557{
558	return (kmem_zalloc(size, KM_SLEEP));
559}
560
561/* ARGSUSED */
562static void
563i_fm_free(nv_alloc_t *nva, void *buf, size_t size)
564{
565	kmem_free(buf, size);
566}
567
568const nv_alloc_ops_t fm_mem_alloc_ops = {
569	NULL,
570	NULL,
571	i_fm_alloc,
572	i_fm_free,
573	NULL
574};
575
576/*
577 * Create and initialize a new nv_alloc_t for a fixed buffer, buf.  A pointer
578 * to the newly allocated nv_alloc_t structure is returned upon success or NULL
579 * is returned to indicate that the nv_alloc structure could not be created.
580 */
581nv_alloc_t *
582fm_nva_xcreate(char *buf, size_t bufsz)
583{
584	nv_alloc_t *nvhdl = kmem_zalloc(sizeof (nv_alloc_t), KM_SLEEP);
585
586	if (bufsz == 0 || nv_alloc_init(nvhdl, nv_fixed_ops, buf, bufsz) != 0) {
587		kmem_free(nvhdl, sizeof (nv_alloc_t));
588		return (NULL);
589	}
590
591	return (nvhdl);
592}
593
594/*
595 * Destroy a previously allocated nv_alloc structure.  The fixed buffer
596 * associated with nva must be freed by the caller.
597 */
598void
599fm_nva_xdestroy(nv_alloc_t *nva)
600{
601	nv_alloc_fini(nva);
602	kmem_free(nva, sizeof (nv_alloc_t));
603}
604
605/*
606 * Create a new nv list.  A pointer to a new nv list structure is returned
607 * upon success or NULL is returned to indicate that the structure could
608 * not be created.  The newly created nv list is created and managed by the
609 * operations installed in nva.   If nva is NULL, the default FMA nva
610 * operations are installed and used.
611 *
612 * When called from the kernel and nva == NULL, this function must be called
613 * from passive kernel context with no locks held that can prevent a
614 * sleeping memory allocation from occurring.  Otherwise, this function may
615 * be called from other kernel contexts as long a valid nva created via
616 * fm_nva_create() is supplied.
617 */
618nvlist_t *
619fm_nvlist_create(nv_alloc_t *nva)
620{
621	int hdl_alloced = 0;
622	nvlist_t *nvl;
623	nv_alloc_t *nvhdl;
624
625	if (nva == NULL) {
626		nvhdl = kmem_zalloc(sizeof (nv_alloc_t), KM_SLEEP);
627
628		if (nv_alloc_init(nvhdl, &fm_mem_alloc_ops, NULL, 0) != 0) {
629			kmem_free(nvhdl, sizeof (nv_alloc_t));
630			return (NULL);
631		}
632		hdl_alloced = 1;
633	} else {
634		nvhdl = nva;
635	}
636
637	if (nvlist_xalloc(&nvl, NV_UNIQUE_NAME, nvhdl) != 0) {
638		if (hdl_alloced) {
639			nv_alloc_fini(nvhdl);
640			kmem_free(nvhdl, sizeof (nv_alloc_t));
641		}
642		return (NULL);
643	}
644
645	return (nvl);
646}
647
648/*
649 * Destroy a previously allocated nvlist structure.  flag indicates whether
650 * or not the associated nva structure should be freed (FM_NVA_FREE) or
651 * retained (FM_NVA_RETAIN).  Retaining the nv alloc structure allows
652 * it to be re-used for future nvlist creation operations.
653 */
654void
655fm_nvlist_destroy(nvlist_t *nvl, int flag)
656{
657	nv_alloc_t *nva = nvlist_lookup_nv_alloc(nvl);
658
659	nvlist_free(nvl);
660
661	if (nva != NULL) {
662		if (flag == FM_NVA_FREE)
663			fm_nva_xdestroy(nva);
664	}
665}
666
667int
668i_fm_payload_set(nvlist_t *payload, const char *name, va_list ap)
669{
670	int nelem, ret = 0;
671	data_type_t type;
672
673	while (ret == 0 && name != NULL) {
674		type = va_arg(ap, data_type_t);
675		switch (type) {
676		case DATA_TYPE_BYTE:
677			ret = nvlist_add_byte(payload, name,
678			    va_arg(ap, uint_t));
679			break;
680		case DATA_TYPE_BYTE_ARRAY:
681			nelem = va_arg(ap, int);
682			ret = nvlist_add_byte_array(payload, name,
683			    va_arg(ap, uchar_t *), nelem);
684			break;
685		case DATA_TYPE_BOOLEAN_VALUE:
686			ret = nvlist_add_boolean_value(payload, name,
687			    va_arg(ap, boolean_t));
688			break;
689		case DATA_TYPE_BOOLEAN_ARRAY:
690			nelem = va_arg(ap, int);
691			ret = nvlist_add_boolean_array(payload, name,
692			    va_arg(ap, boolean_t *), nelem);
693			break;
694		case DATA_TYPE_INT8:
695			ret = nvlist_add_int8(payload, name,
696			    va_arg(ap, int));
697			break;
698		case DATA_TYPE_INT8_ARRAY:
699			nelem = va_arg(ap, int);
700			ret = nvlist_add_int8_array(payload, name,
701			    va_arg(ap, int8_t *), nelem);
702			break;
703		case DATA_TYPE_UINT8:
704			ret = nvlist_add_uint8(payload, name,
705			    va_arg(ap, uint_t));
706			break;
707		case DATA_TYPE_UINT8_ARRAY:
708			nelem = va_arg(ap, int);
709			ret = nvlist_add_uint8_array(payload, name,
710			    va_arg(ap, uint8_t *), nelem);
711			break;
712		case DATA_TYPE_INT16:
713			ret = nvlist_add_int16(payload, name,
714			    va_arg(ap, int));
715			break;
716		case DATA_TYPE_INT16_ARRAY:
717			nelem = va_arg(ap, int);
718			ret = nvlist_add_int16_array(payload, name,
719			    va_arg(ap, int16_t *), nelem);
720			break;
721		case DATA_TYPE_UINT16:
722			ret = nvlist_add_uint16(payload, name,
723			    va_arg(ap, uint_t));
724			break;
725		case DATA_TYPE_UINT16_ARRAY:
726			nelem = va_arg(ap, int);
727			ret = nvlist_add_uint16_array(payload, name,
728			    va_arg(ap, uint16_t *), nelem);
729			break;
730		case DATA_TYPE_INT32:
731			ret = nvlist_add_int32(payload, name,
732			    va_arg(ap, int32_t));
733			break;
734		case DATA_TYPE_INT32_ARRAY:
735			nelem = va_arg(ap, int);
736			ret = nvlist_add_int32_array(payload, name,
737			    va_arg(ap, int32_t *), nelem);
738			break;
739		case DATA_TYPE_UINT32:
740			ret = nvlist_add_uint32(payload, name,
741			    va_arg(ap, uint32_t));
742			break;
743		case DATA_TYPE_UINT32_ARRAY:
744			nelem = va_arg(ap, int);
745			ret = nvlist_add_uint32_array(payload, name,
746			    va_arg(ap, uint32_t *), nelem);
747			break;
748		case DATA_TYPE_INT64:
749			ret = nvlist_add_int64(payload, name,
750			    va_arg(ap, int64_t));
751			break;
752		case DATA_TYPE_INT64_ARRAY:
753			nelem = va_arg(ap, int);
754			ret = nvlist_add_int64_array(payload, name,
755			    va_arg(ap, int64_t *), nelem);
756			break;
757		case DATA_TYPE_UINT64:
758			ret = nvlist_add_uint64(payload, name,
759			    va_arg(ap, uint64_t));
760			break;
761		case DATA_TYPE_UINT64_ARRAY:
762			nelem = va_arg(ap, int);
763			ret = nvlist_add_uint64_array(payload, name,
764			    va_arg(ap, uint64_t *), nelem);
765			break;
766		case DATA_TYPE_STRING:
767			ret = nvlist_add_string(payload, name,
768			    va_arg(ap, char *));
769			break;
770		case DATA_TYPE_STRING_ARRAY:
771			nelem = va_arg(ap, int);
772			ret = nvlist_add_string_array(payload, name,
773			    va_arg(ap, char **), nelem);
774			break;
775		case DATA_TYPE_NVLIST:
776			ret = nvlist_add_nvlist(payload, name,
777			    va_arg(ap, nvlist_t *));
778			break;
779		case DATA_TYPE_NVLIST_ARRAY:
780			nelem = va_arg(ap, int);
781			ret = nvlist_add_nvlist_array(payload, name,
782			    va_arg(ap, nvlist_t **), nelem);
783			break;
784		default:
785			ret = EINVAL;
786		}
787
788		name = va_arg(ap, char *);
789	}
790	return (ret);
791}
792
793void
794fm_payload_set(nvlist_t *payload, ...)
795{
796	int ret;
797	const char *name;
798	va_list ap;
799
800	va_start(ap, payload);
801	name = va_arg(ap, char *);
802	ret = i_fm_payload_set(payload, name, ap);
803	va_end(ap);
804
805	if (ret)
806		atomic_inc_64(&erpt_kstat_data.payload_set_failed.value.ui64);
807}
808
809/*
810 * Set-up and validate the members of an ereport event according to:
811 *
812 *	Member name		Type		Value
813 *	====================================================
814 *	class			string		ereport
815 *	version			uint8_t		0
816 *	ena			uint64_t	<ena>
817 *	detector		nvlist_t	<detector>
818 *	ereport-payload		nvlist_t	<var args>
819 *
820 * We don't actually add a 'version' member to the payload.  Really,
821 * the version quoted to us by our caller is that of the category 1
822 * "ereport" event class (and we require FM_EREPORT_VERS0) but
823 * the payload version of the actual leaf class event under construction
824 * may be something else.  Callers should supply a version in the varargs,
825 * or (better) we could take two version arguments - one for the
826 * ereport category 1 classification (expect FM_EREPORT_VERS0) and one
827 * for the leaf class.
828 */
829void
830fm_ereport_set(nvlist_t *ereport, int version, const char *erpt_class,
831    uint64_t ena, const nvlist_t *detector, ...)
832{
833	char ereport_class[FM_MAX_CLASS];
834	const char *name;
835	va_list ap;
836	int ret;
837
838	if (version != FM_EREPORT_VERS0) {
839		atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64);
840		return;
841	}
842
843	(void) snprintf(ereport_class, FM_MAX_CLASS, "%s.%s",
844	    FM_EREPORT_CLASS, erpt_class);
845	if (nvlist_add_string(ereport, FM_CLASS, ereport_class) != 0) {
846		atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64);
847		return;
848	}
849
850	if (nvlist_add_uint64(ereport, FM_EREPORT_ENA, ena)) {
851		atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64);
852	}
853
854	if (nvlist_add_nvlist(ereport, FM_EREPORT_DETECTOR,
855	    (nvlist_t *)detector) != 0) {
856		atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64);
857	}
858
859	va_start(ap, detector);
860	name = va_arg(ap, const char *);
861	ret = i_fm_payload_set(ereport, name, ap);
862	va_end(ap);
863
864	if (ret)
865		atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64);
866}
867
868/*
869 * Set-up and validate the members of an hc fmri according to;
870 *
871 *	Member name		Type		Value
872 *	===================================================
873 *	version			uint8_t		0
874 *	auth			nvlist_t	<auth>
875 *	hc-name			string		<name>
876 *	hc-id			string		<id>
877 *
878 * Note that auth and hc-id are optional members.
879 */
880
881#define	HC_MAXPAIRS	20
882#define	HC_MAXNAMELEN	50
883
884static int
885fm_fmri_hc_set_common(nvlist_t *fmri, int version, const nvlist_t *auth)
886{
887	if (version != FM_HC_SCHEME_VERSION) {
888		atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
889		return (0);
890	}
891
892	if (nvlist_add_uint8(fmri, FM_VERSION, version) != 0 ||
893	    nvlist_add_string(fmri, FM_FMRI_SCHEME, FM_FMRI_SCHEME_HC) != 0) {
894		atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
895		return (0);
896	}
897
898	if (auth != NULL && nvlist_add_nvlist(fmri, FM_FMRI_AUTHORITY,
899	    (nvlist_t *)auth) != 0) {
900		atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
901		return (0);
902	}
903
904	return (1);
905}
906
907void
908fm_fmri_hc_set(nvlist_t *fmri, int version, const nvlist_t *auth,
909    nvlist_t *snvl, int npairs, ...)
910{
911	nv_alloc_t *nva = nvlist_lookup_nv_alloc(fmri);
912	nvlist_t *pairs[HC_MAXPAIRS];
913	va_list ap;
914	int i;
915
916	if (!fm_fmri_hc_set_common(fmri, version, auth))
917		return;
918
919	npairs = MIN(npairs, HC_MAXPAIRS);
920
921	va_start(ap, npairs);
922	for (i = 0; i < npairs; i++) {
923		const char *name = va_arg(ap, const char *);
924		uint32_t id = va_arg(ap, uint32_t);
925		char idstr[11];
926
927		(void) snprintf(idstr, sizeof (idstr), "%u", id);
928
929		pairs[i] = fm_nvlist_create(nva);
930		if (nvlist_add_string(pairs[i], FM_FMRI_HC_NAME, name) != 0 ||
931		    nvlist_add_string(pairs[i], FM_FMRI_HC_ID, idstr) != 0) {
932			atomic_inc_64(
933			    &erpt_kstat_data.fmri_set_failed.value.ui64);
934		}
935	}
936	va_end(ap);
937
938	if (nvlist_add_nvlist_array(fmri, FM_FMRI_HC_LIST, pairs, npairs) != 0)
939		atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
940
941	for (i = 0; i < npairs; i++)
942		fm_nvlist_destroy(pairs[i], FM_NVA_RETAIN);
943
944	if (snvl != NULL) {
945		if (nvlist_add_nvlist(fmri, FM_FMRI_HC_SPECIFIC, snvl) != 0) {
946			atomic_inc_64(
947			    &erpt_kstat_data.fmri_set_failed.value.ui64);
948		}
949	}
950}
951
952/*
953 * Set-up and validate the members of an dev fmri according to:
954 *
955 *	Member name		Type		Value
956 *	====================================================
957 *	version			uint8_t		0
958 *	auth			nvlist_t	<auth>
959 *	devpath			string		<devpath>
960 *	[devid]			string		<devid>
961 *	[target-port-l0id]	string		<target-port-lun0-id>
962 *
963 * Note that auth and devid are optional members.
964 */
965void
966fm_fmri_dev_set(nvlist_t *fmri_dev, int version, const nvlist_t *auth,
967    const char *devpath, const char *devid, const char *tpl0)
968{
969	int err = 0;
970
971	if (version != DEV_SCHEME_VERSION0) {
972		atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
973		return;
974	}
975
976	err |= nvlist_add_uint8(fmri_dev, FM_VERSION, version);
977	err |= nvlist_add_string(fmri_dev, FM_FMRI_SCHEME, FM_FMRI_SCHEME_DEV);
978
979	if (auth != NULL) {
980		err |= nvlist_add_nvlist(fmri_dev, FM_FMRI_AUTHORITY,
981		    (nvlist_t *)auth);
982	}
983
984	err |= nvlist_add_string(fmri_dev, FM_FMRI_DEV_PATH, devpath);
985
986	if (devid != NULL)
987		err |= nvlist_add_string(fmri_dev, FM_FMRI_DEV_ID, devid);
988
989	if (tpl0 != NULL)
990		err |= nvlist_add_string(fmri_dev, FM_FMRI_DEV_TGTPTLUN0, tpl0);
991
992	if (err)
993		atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
994
995}
996
997/*
998 * Set-up and validate the members of an cpu fmri according to:
999 *
1000 *	Member name		Type		Value
1001 *	====================================================
1002 *	version			uint8_t		0
1003 *	auth			nvlist_t	<auth>
1004 *	cpuid			uint32_t	<cpu_id>
1005 *	cpumask			uint8_t		<cpu_mask>
1006 *	serial			uint64_t	<serial_id>
1007 *
1008 * Note that auth, cpumask, serial are optional members.
1009 *
1010 */
1011void
1012fm_fmri_cpu_set(nvlist_t *fmri_cpu, int version, const nvlist_t *auth,
1013    uint32_t cpu_id, uint8_t *cpu_maskp, const char *serial_idp)
1014{
1015	uint64_t *failedp = &erpt_kstat_data.fmri_set_failed.value.ui64;
1016
1017	if (version < CPU_SCHEME_VERSION1) {
1018		atomic_inc_64(failedp);
1019		return;
1020	}
1021
1022	if (nvlist_add_uint8(fmri_cpu, FM_VERSION, version) != 0) {
1023		atomic_inc_64(failedp);
1024		return;
1025	}
1026
1027	if (nvlist_add_string(fmri_cpu, FM_FMRI_SCHEME,
1028	    FM_FMRI_SCHEME_CPU) != 0) {
1029		atomic_inc_64(failedp);
1030		return;
1031	}
1032
1033	if (auth != NULL && nvlist_add_nvlist(fmri_cpu, FM_FMRI_AUTHORITY,
1034	    (nvlist_t *)auth) != 0)
1035		atomic_inc_64(failedp);
1036
1037	if (nvlist_add_uint32(fmri_cpu, FM_FMRI_CPU_ID, cpu_id) != 0)
1038		atomic_inc_64(failedp);
1039
1040	if (cpu_maskp != NULL && nvlist_add_uint8(fmri_cpu, FM_FMRI_CPU_MASK,
1041	    *cpu_maskp) != 0)
1042		atomic_inc_64(failedp);
1043
1044	if (serial_idp == NULL || nvlist_add_string(fmri_cpu,
1045	    FM_FMRI_CPU_SERIAL_ID, (char *)serial_idp) != 0)
1046			atomic_inc_64(failedp);
1047}
1048
1049/*
1050 * Set-up and validate the members of a mem according to:
1051 *
1052 *	Member name		Type		Value
1053 *	====================================================
1054 *	version			uint8_t		0
1055 *	auth			nvlist_t	<auth>		[optional]
1056 *	unum			string		<unum>
1057 *	serial			string		<serial>	[optional*]
1058 *	offset			uint64_t	<offset>	[optional]
1059 *
1060 *	* serial is required if offset is present
1061 */
1062void
1063fm_fmri_mem_set(nvlist_t *fmri, int version, const nvlist_t *auth,
1064    const char *unum, const char *serial, uint64_t offset)
1065{
1066	if (version != MEM_SCHEME_VERSION0) {
1067		atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
1068		return;
1069	}
1070
1071	if (!serial && (offset != (uint64_t)-1)) {
1072		atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
1073		return;
1074	}
1075
1076	if (nvlist_add_uint8(fmri, FM_VERSION, version) != 0) {
1077		atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
1078		return;
1079	}
1080
1081	if (nvlist_add_string(fmri, FM_FMRI_SCHEME, FM_FMRI_SCHEME_MEM) != 0) {
1082		atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
1083		return;
1084	}
1085
1086	if (auth != NULL) {
1087		if (nvlist_add_nvlist(fmri, FM_FMRI_AUTHORITY,
1088		    (nvlist_t *)auth) != 0) {
1089			atomic_inc_64(
1090			    &erpt_kstat_data.fmri_set_failed.value.ui64);
1091		}
1092	}
1093
1094	if (nvlist_add_string(fmri, FM_FMRI_MEM_UNUM, unum) != 0) {
1095		atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
1096	}
1097
1098	if (serial != NULL) {
1099		if (nvlist_add_string_array(fmri, FM_FMRI_MEM_SERIAL_ID,
1100		    (char **)&serial, 1) != 0) {
1101			atomic_inc_64(
1102			    &erpt_kstat_data.fmri_set_failed.value.ui64);
1103		}
1104		if (offset != (uint64_t)-1 && nvlist_add_uint64(fmri,
1105		    FM_FMRI_MEM_OFFSET, offset) != 0) {
1106			atomic_inc_64(
1107			    &erpt_kstat_data.fmri_set_failed.value.ui64);
1108		}
1109	}
1110}
1111
1112void
1113fm_fmri_zfs_set(nvlist_t *fmri, int version, uint64_t pool_guid,
1114    uint64_t vdev_guid)
1115{
1116	if (version != ZFS_SCHEME_VERSION0) {
1117		atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
1118		return;
1119	}
1120
1121	if (nvlist_add_uint8(fmri, FM_VERSION, version) != 0) {
1122		atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
1123		return;
1124	}
1125
1126	if (nvlist_add_string(fmri, FM_FMRI_SCHEME, FM_FMRI_SCHEME_ZFS) != 0) {
1127		atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
1128		return;
1129	}
1130
1131	if (nvlist_add_uint64(fmri, FM_FMRI_ZFS_POOL, pool_guid) != 0) {
1132		atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
1133	}
1134
1135	if (vdev_guid != 0) {
1136		if (nvlist_add_uint64(fmri, FM_FMRI_ZFS_VDEV, vdev_guid) != 0) {
1137			atomic_inc_64(
1138			    &erpt_kstat_data.fmri_set_failed.value.ui64);
1139		}
1140	}
1141}
1142
1143uint64_t
1144fm_ena_increment(uint64_t ena)
1145{
1146	uint64_t new_ena;
1147
1148	switch (ENA_FORMAT(ena)) {
1149	case FM_ENA_FMT1:
1150		new_ena = ena + (1 << ENA_FMT1_GEN_SHFT);
1151		break;
1152	case FM_ENA_FMT2:
1153		new_ena = ena + (1 << ENA_FMT2_GEN_SHFT);
1154		break;
1155	default:
1156		new_ena = 0;
1157	}
1158
1159	return (new_ena);
1160}
1161
1162uint64_t
1163fm_ena_generate_cpu(uint64_t timestamp, processorid_t cpuid, uchar_t format)
1164{
1165	uint64_t ena = 0;
1166
1167	switch (format) {
1168	case FM_ENA_FMT1:
1169		if (timestamp) {
1170			ena = (uint64_t)((format & ENA_FORMAT_MASK) |
1171			    ((cpuid << ENA_FMT1_CPUID_SHFT) &
1172			    ENA_FMT1_CPUID_MASK) |
1173			    ((timestamp << ENA_FMT1_TIME_SHFT) &
1174			    ENA_FMT1_TIME_MASK));
1175		} else {
1176			ena = (uint64_t)((format & ENA_FORMAT_MASK) |
1177			    ((cpuid << ENA_FMT1_CPUID_SHFT) &
1178			    ENA_FMT1_CPUID_MASK) |
1179			    ((gethrtime_waitfree() << ENA_FMT1_TIME_SHFT) &
1180			    ENA_FMT1_TIME_MASK));
1181		}
1182		break;
1183	case FM_ENA_FMT2:
1184		ena = (uint64_t)((format & ENA_FORMAT_MASK) |
1185		    ((timestamp << ENA_FMT2_TIME_SHFT) & ENA_FMT2_TIME_MASK));
1186		break;
1187	default:
1188		break;
1189	}
1190
1191	return (ena);
1192}
1193
1194uint64_t
1195fm_ena_generate(uint64_t timestamp, uchar_t format)
1196{
1197	return (fm_ena_generate_cpu(timestamp, PCPU_GET(cpuid), format));
1198}
1199
1200uint64_t
1201fm_ena_generation_get(uint64_t ena)
1202{
1203	uint64_t gen;
1204
1205	switch (ENA_FORMAT(ena)) {
1206	case FM_ENA_FMT1:
1207		gen = (ena & ENA_FMT1_GEN_MASK) >> ENA_FMT1_GEN_SHFT;
1208		break;
1209	case FM_ENA_FMT2:
1210		gen = (ena & ENA_FMT2_GEN_MASK) >> ENA_FMT2_GEN_SHFT;
1211		break;
1212	default:
1213		gen = 0;
1214		break;
1215	}
1216
1217	return (gen);
1218}
1219
1220uchar_t
1221fm_ena_format_get(uint64_t ena)
1222{
1223
1224	return (ENA_FORMAT(ena));
1225}
1226
1227uint64_t
1228fm_ena_id_get(uint64_t ena)
1229{
1230	uint64_t id;
1231
1232	switch (ENA_FORMAT(ena)) {
1233	case FM_ENA_FMT1:
1234		id = (ena & ENA_FMT1_ID_MASK) >> ENA_FMT1_ID_SHFT;
1235		break;
1236	case FM_ENA_FMT2:
1237		id = (ena & ENA_FMT2_ID_MASK) >> ENA_FMT2_ID_SHFT;
1238		break;
1239	default:
1240		id = 0;
1241	}
1242
1243	return (id);
1244}
1245
1246uint64_t
1247fm_ena_time_get(uint64_t ena)
1248{
1249	uint64_t time;
1250
1251	switch (ENA_FORMAT(ena)) {
1252	case FM_ENA_FMT1:
1253		time = (ena & ENA_FMT1_TIME_MASK) >> ENA_FMT1_TIME_SHFT;
1254		break;
1255	case FM_ENA_FMT2:
1256		time = (ena & ENA_FMT2_TIME_MASK) >> ENA_FMT2_TIME_SHFT;
1257		break;
1258	default:
1259		time = 0;
1260	}
1261
1262	return (time);
1263}
1264
1265#ifdef illumos
1266/*
1267 * Convert a getpcstack() trace to symbolic name+offset, and add the resulting
1268 * string array to a Fault Management ereport as FM_EREPORT_PAYLOAD_NAME_STACK.
1269 */
1270void
1271fm_payload_stack_add(nvlist_t *payload, const pc_t *stack, int depth)
1272{
1273	int i;
1274	char *sym;
1275	ulong_t off;
1276	char *stkpp[FM_STK_DEPTH];
1277	char buf[FM_STK_DEPTH * FM_SYM_SZ];
1278	char *stkp = buf;
1279
1280	for (i = 0; i < depth && i != FM_STK_DEPTH; i++, stkp += FM_SYM_SZ) {
1281		if ((sym = kobj_getsymname(stack[i], &off)) != NULL)
1282			(void) snprintf(stkp, FM_SYM_SZ, "%s+%lx", sym, off);
1283		else
1284			(void) snprintf(stkp, FM_SYM_SZ, "%lx", (long)stack[i]);
1285		stkpp[i] = stkp;
1286	}
1287
1288	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_STACK,
1289	    DATA_TYPE_STRING_ARRAY, depth, stkpp, NULL);
1290}
1291#endif
1292
1293#ifdef illumos
1294void
1295print_msg_hwerr(ctid_t ct_id, proc_t *p)
1296{
1297	uprintf("Killed process %d (%s) in contract id %d "
1298	    "due to hardware error\n", p->p_pid, p->p_user.u_comm, ct_id);
1299}
1300#endif
1301
1302void
1303fm_fmri_hc_create(nvlist_t *fmri, int version, const nvlist_t *auth,
1304    nvlist_t *snvl, nvlist_t *bboard, int npairs, ...)
1305{
1306	nv_alloc_t *nva = nvlist_lookup_nv_alloc(fmri);
1307	nvlist_t *pairs[HC_MAXPAIRS];
1308	nvlist_t **hcl;
1309	uint_t n;
1310	int i, j;
1311	va_list ap;
1312	char *hcname, *hcid;
1313
1314	if (!fm_fmri_hc_set_common(fmri, version, auth))
1315		return;
1316
1317	/*
1318	 * copy the bboard nvpairs to the pairs array
1319	 */
1320	if (nvlist_lookup_nvlist_array(bboard, FM_FMRI_HC_LIST, &hcl, &n)
1321	    != 0) {
1322		atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
1323		return;
1324	}
1325
1326	for (i = 0; i < n; i++) {
1327		if (nvlist_lookup_string(hcl[i], FM_FMRI_HC_NAME,
1328		    &hcname) != 0) {
1329			atomic_inc_64(
1330			    &erpt_kstat_data.fmri_set_failed.value.ui64);
1331			return;
1332		}
1333		if (nvlist_lookup_string(hcl[i], FM_FMRI_HC_ID, &hcid) != 0) {
1334			atomic_inc_64(
1335			    &erpt_kstat_data.fmri_set_failed.value.ui64);
1336			return;
1337		}
1338
1339		pairs[i] = fm_nvlist_create(nva);
1340		if (nvlist_add_string(pairs[i], FM_FMRI_HC_NAME, hcname) != 0 ||
1341		    nvlist_add_string(pairs[i], FM_FMRI_HC_ID, hcid) != 0) {
1342			for (j = 0; j <= i; j++) {
1343				if (pairs[j] != NULL)
1344					fm_nvlist_destroy(pairs[j],
1345					    FM_NVA_RETAIN);
1346			}
1347			atomic_inc_64(
1348			    &erpt_kstat_data.fmri_set_failed.value.ui64);
1349			return;
1350		}
1351	}
1352
1353	/*
1354	 * create the pairs from passed in pairs
1355	 */
1356	npairs = MIN(npairs, HC_MAXPAIRS);
1357
1358	va_start(ap, npairs);
1359	for (i = n; i < npairs + n; i++) {
1360		const char *name = va_arg(ap, const char *);
1361		uint32_t id = va_arg(ap, uint32_t);
1362		char idstr[11];
1363		(void) snprintf(idstr, sizeof (idstr), "%u", id);
1364		pairs[i] = fm_nvlist_create(nva);
1365		if (nvlist_add_string(pairs[i], FM_FMRI_HC_NAME, name) != 0 ||
1366		    nvlist_add_string(pairs[i], FM_FMRI_HC_ID, idstr) != 0) {
1367			for (j = 0; j <= i; j++) {
1368				if (pairs[j] != NULL)
1369					fm_nvlist_destroy(pairs[j],
1370					    FM_NVA_RETAIN);
1371			}
1372			atomic_inc_64(
1373			    &erpt_kstat_data.fmri_set_failed.value.ui64);
1374			return;
1375		}
1376	}
1377	va_end(ap);
1378
1379	/*
1380	 * Create the fmri hc list
1381	 */
1382	if (nvlist_add_nvlist_array(fmri, FM_FMRI_HC_LIST, pairs,
1383	    npairs + n) != 0) {
1384		atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
1385		return;
1386	}
1387
1388	for (i = 0; i < npairs + n; i++) {
1389			fm_nvlist_destroy(pairs[i], FM_NVA_RETAIN);
1390	}
1391
1392	if (snvl != NULL) {
1393		if (nvlist_add_nvlist(fmri, FM_FMRI_HC_SPECIFIC, snvl) != 0) {
1394			atomic_inc_64(
1395			    &erpt_kstat_data.fmri_set_failed.value.ui64);
1396			return;
1397		}
1398	}
1399}
1400