zfs_fm.c revision 213198
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26#include <sys/spa.h>
27#include <sys/spa_impl.h>
28#include <sys/vdev.h>
29#include <sys/vdev_impl.h>
30#include <sys/zio.h>
31
32#include <sys/fm/fs/zfs.h>
33#include <sys/fm/protocol.h>
34#include <sys/fm/util.h>
35
36#ifdef _KERNEL
37/* Including sys/bus.h is just too hard, so I declare what I need here. */
38extern void devctl_notify(const char *__system, const char *__subsystem,
39    const char *__type, const char *__data);
40#endif
41
42/*
43 * This general routine is responsible for generating all the different ZFS
44 * ereports.  The payload is dependent on the class, and which arguments are
45 * supplied to the function:
46 *
47 * 	EREPORT			POOL	VDEV	IO
48 * 	block			X	X	X
49 * 	data			X		X
50 * 	device			X	X
51 * 	pool			X
52 *
53 * If we are in a loading state, all errors are chained together by the same
54 * SPA-wide ENA (Error Numeric Association).
55 *
56 * For isolated I/O requests, we get the ENA from the zio_t. The propagation
57 * gets very complicated due to RAID-Z, gang blocks, and vdev caching.  We want
58 * to chain together all ereports associated with a logical piece of data.  For
59 * read I/Os, there  are basically three 'types' of I/O, which form a roughly
60 * layered diagram:
61 *
62 *      +---------------+
63 * 	| Aggregate I/O |	No associated logical data or device
64 * 	+---------------+
65 *              |
66 *              V
67 * 	+---------------+	Reads associated with a piece of logical data.
68 * 	|   Read I/O    |	This includes reads on behalf of RAID-Z,
69 * 	+---------------+       mirrors, gang blocks, retries, etc.
70 *              |
71 *              V
72 * 	+---------------+	Reads associated with a particular device, but
73 * 	| Physical I/O  |	no logical data.  Issued as part of vdev caching
74 * 	+---------------+	and I/O aggregation.
75 *
76 * Note that 'physical I/O' here is not the same terminology as used in the rest
77 * of ZIO.  Typically, 'physical I/O' simply means that there is no attached
78 * blockpointer.  But I/O with no associated block pointer can still be related
79 * to a logical piece of data (i.e. RAID-Z requests).
80 *
81 * Purely physical I/O always have unique ENAs.  They are not related to a
82 * particular piece of logical data, and therefore cannot be chained together.
83 * We still generate an ereport, but the DE doesn't correlate it with any
84 * logical piece of data.  When such an I/O fails, the delegated I/O requests
85 * will issue a retry, which will trigger the 'real' ereport with the correct
86 * ENA.
87 *
88 * We keep track of the ENA for a ZIO chain through the 'io_logical' member.
89 * When a new logical I/O is issued, we set this to point to itself.  Child I/Os
90 * then inherit this pointer, so that when it is first set subsequent failures
91 * will use the same ENA.  For vdev cache fill and queue aggregation I/O,
92 * this pointer is set to NULL, and no ereport will be generated (since it
93 * doesn't actually correspond to any particular device or piece of data,
94 * and the caller will always retry without caching or queueing anyway).
95 */
96void
97zfs_ereport_post(const char *subclass, spa_t *spa, vdev_t *vd, zio_t *zio,
98    uint64_t stateoroffset, uint64_t size)
99{
100#ifdef _KERNEL
101	char buf[1024];
102	struct sbuf sb;
103	struct timespec ts;
104	int error;
105
106	/*
107	 * If we are doing a spa_tryimport(), ignore errors.
108	 */
109	if (spa->spa_load_state == SPA_LOAD_TRYIMPORT)
110		return;
111
112	/*
113	 * If we are in the middle of opening a pool, and the previous attempt
114	 * failed, don't bother logging any new ereports - we're just going to
115	 * get the same diagnosis anyway.
116	 */
117	if (spa->spa_load_state != SPA_LOAD_NONE &&
118	    spa->spa_last_open_failed)
119		return;
120
121	if (zio != NULL) {
122		/*
123		 * If this is not a read or write zio, ignore the error.  This
124		 * can occur if the DKIOCFLUSHWRITECACHE ioctl fails.
125		 */
126		if (zio->io_type != ZIO_TYPE_READ &&
127		    zio->io_type != ZIO_TYPE_WRITE)
128			return;
129
130		/*
131		 * Ignore any errors from speculative I/Os, as failure is an
132		 * expected result.
133		 */
134		if (zio->io_flags & ZIO_FLAG_SPECULATIVE)
135			return;
136
137		/*
138		 * If this I/O is not a retry I/O, don't post an ereport.
139		 * Otherwise, we risk making bad diagnoses based on B_FAILFAST
140		 * I/Os.
141		 */
142		if (zio->io_error == EIO &&
143		    !(zio->io_flags & ZIO_FLAG_IO_RETRY))
144			return;
145
146		if (vd != NULL) {
147			/*
148			 * If the vdev has already been marked as failing due
149			 * to a failed probe, then ignore any subsequent I/O
150			 * errors, as the DE will automatically fault the vdev
151			 * on the first such failure.  This also catches cases
152			 * where vdev_remove_wanted is set and the device has
153			 * not yet been asynchronously placed into the REMOVED
154			 * state.
155			 */
156			if (zio->io_vd == vd &&
157			    !vdev_accessible(vd, zio) &&
158			    strcmp(subclass, FM_EREPORT_ZFS_PROBE_FAILURE) != 0)
159				return;
160
161			/*
162			 * Ignore checksum errors for reads from DTL regions of
163			 * leaf vdevs.
164			 */
165			if (zio->io_type == ZIO_TYPE_READ &&
166			    zio->io_error == ECKSUM &&
167			    vd->vdev_ops->vdev_op_leaf &&
168			    vdev_dtl_contains(vd, DTL_MISSING, zio->io_txg, 1))
169				return;
170		}
171	}
172	nanotime(&ts);
173
174	sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN);
175	sbuf_printf(&sb, "time=%ju.%ld", (uintmax_t)ts.tv_sec, ts.tv_nsec);
176
177	/*
178	 * Serialize ereport generation
179	 */
180	mutex_enter(&spa->spa_errlist_lock);
181
182#if 0
183	/*
184	 * Determine the ENA to use for this event.  If we are in a loading
185	 * state, use a SPA-wide ENA.  Otherwise, if we are in an I/O state, use
186	 * a root zio-wide ENA.  Otherwise, simply use a unique ENA.
187	 */
188	if (spa->spa_load_state != SPA_LOAD_NONE) {
189#if 0
190		if (spa->spa_ena == 0)
191			spa->spa_ena = fm_ena_generate(0, FM_ENA_FMT1);
192#endif
193		ena = spa->spa_ena;
194	} else if (zio != NULL && zio->io_logical != NULL) {
195#if 0
196		if (zio->io_logical->io_ena == 0)
197			zio->io_logical->io_ena =
198			    fm_ena_generate(0, FM_ENA_FMT1);
199#endif
200		ena = zio->io_logical->io_ena;
201	} else {
202#if 0
203		ena = fm_ena_generate(0, FM_ENA_FMT1);
204#else
205		ena = 0;
206#endif
207	}
208#endif
209
210	/*
211	 * Construct the full class, detector, and other standard FMA fields.
212	 */
213	sbuf_printf(&sb, " ereport_version=%u", FM_EREPORT_VERSION);
214	sbuf_printf(&sb, " class=%s.%s", ZFS_ERROR_CLASS, subclass);
215
216	sbuf_printf(&sb, " zfs_scheme_version=%u", FM_ZFS_SCHEME_VERSION);
217
218	/*
219	 * Construct the per-ereport payload, depending on which parameters are
220	 * passed in.
221	 */
222
223	/*
224	 * Generic payload members common to all ereports.
225	 */
226	sbuf_printf(&sb, " %s=%s", FM_EREPORT_PAYLOAD_ZFS_POOL, spa_name(spa));
227	sbuf_printf(&sb, " %s=%ju", FM_EREPORT_PAYLOAD_ZFS_POOL_GUID,
228	    spa_guid(spa));
229	sbuf_printf(&sb, " %s=%d", FM_EREPORT_PAYLOAD_ZFS_POOL_CONTEXT,
230	    spa->spa_load_state);
231
232	if (spa != NULL) {
233		sbuf_printf(&sb, " %s=%s", FM_EREPORT_PAYLOAD_ZFS_POOL_FAILMODE,
234		    spa_get_failmode(spa) == ZIO_FAILURE_MODE_WAIT ?
235		    FM_EREPORT_FAILMODE_WAIT :
236		    spa_get_failmode(spa) == ZIO_FAILURE_MODE_CONTINUE ?
237		    FM_EREPORT_FAILMODE_CONTINUE : FM_EREPORT_FAILMODE_PANIC);
238	}
239
240	if (vd != NULL) {
241		vdev_t *pvd = vd->vdev_parent;
242
243		sbuf_printf(&sb, " %s=%ju", FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID,
244		    vd->vdev_guid);
245		sbuf_printf(&sb, " %s=%s", FM_EREPORT_PAYLOAD_ZFS_VDEV_TYPE,
246		    vd->vdev_ops->vdev_op_type);
247		if (vd->vdev_path != NULL)
248			sbuf_printf(&sb, " %s=%s",
249			    FM_EREPORT_PAYLOAD_ZFS_VDEV_PATH, vd->vdev_path);
250		if (vd->vdev_devid != NULL)
251			sbuf_printf(&sb, " %s=%s",
252			    FM_EREPORT_PAYLOAD_ZFS_VDEV_DEVID, vd->vdev_devid);
253		if (vd->vdev_fru != NULL)
254			sbuf_printf(&sb, " %s=%s",
255			    FM_EREPORT_PAYLOAD_ZFS_VDEV_FRU, vd->vdev_fru);
256
257		if (pvd != NULL) {
258			sbuf_printf(&sb, " %s=%ju",
259			    FM_EREPORT_PAYLOAD_ZFS_PARENT_GUID, pvd->vdev_guid);
260			sbuf_printf(&sb, " %s=%s",
261			    FM_EREPORT_PAYLOAD_ZFS_PARENT_TYPE,
262			    pvd->vdev_ops->vdev_op_type);
263			if (pvd->vdev_path)
264				sbuf_printf(&sb, " %s=%s",
265				    FM_EREPORT_PAYLOAD_ZFS_PARENT_PATH,
266				    pvd->vdev_path);
267			if (pvd->vdev_devid)
268				sbuf_printf(&sb, " %s=%s",
269				    FM_EREPORT_PAYLOAD_ZFS_PARENT_DEVID,
270				    pvd->vdev_devid);
271		}
272	}
273
274	if (zio != NULL) {
275		/*
276		 * Payload common to all I/Os.
277		 */
278		sbuf_printf(&sb, " %s=%u", FM_EREPORT_PAYLOAD_ZFS_ZIO_ERR,
279		    zio->io_error);
280
281		/*
282		 * If the 'size' parameter is non-zero, it indicates this is a
283		 * RAID-Z or other I/O where the physical offset and length are
284		 * provided for us, instead of within the zio_t.
285		 */
286		if (vd != NULL) {
287			if (size) {
288				sbuf_printf(&sb, " %s=%ju",
289				    FM_EREPORT_PAYLOAD_ZFS_ZIO_OFFSET,
290				    stateoroffset);
291				sbuf_printf(&sb, " %s=%ju",
292				    FM_EREPORT_PAYLOAD_ZFS_ZIO_SIZE, size);
293			} else {
294				sbuf_printf(&sb, " %s=%ju",
295				    FM_EREPORT_PAYLOAD_ZFS_ZIO_OFFSET,
296				    zio->io_offset);
297				sbuf_printf(&sb, " %s=%ju",
298				    FM_EREPORT_PAYLOAD_ZFS_ZIO_SIZE,
299				    zio->io_size);
300			}
301		}
302
303		/*
304		 * Payload for I/Os with corresponding logical information.
305		 */
306		if (zio->io_logical != NULL) {
307			sbuf_printf(&sb, " %s=%ju",
308			    FM_EREPORT_PAYLOAD_ZFS_ZIO_OBJECT,
309			    zio->io_logical->io_bookmark.zb_object);
310			sbuf_printf(&sb, " %s=%ju",
311			    FM_EREPORT_PAYLOAD_ZFS_ZIO_LEVEL,
312			    zio->io_logical->io_bookmark.zb_level);
313			sbuf_printf(&sb, " %s=%ju",
314			    FM_EREPORT_PAYLOAD_ZFS_ZIO_BLKID,
315			    zio->io_logical->io_bookmark.zb_blkid);
316		}
317	} else if (vd != NULL) {
318		/*
319		 * If we have a vdev but no zio, this is a device fault, and the
320		 * 'stateoroffset' parameter indicates the previous state of the
321		 * vdev.
322		 */
323		sbuf_printf(&sb, " %s=%ju", FM_EREPORT_PAYLOAD_ZFS_PREV_STATE,
324		    stateoroffset);
325	}
326	mutex_exit(&spa->spa_errlist_lock);
327
328	error = sbuf_finish(&sb);
329	devctl_notify("ZFS", spa->spa_name, subclass, sbuf_data(&sb));
330	if (error != 0)
331		printf("ZFS WARNING: sbuf overflowed\n");
332	sbuf_delete(&sb);
333#endif
334}
335
336static void
337zfs_post_common(spa_t *spa, vdev_t *vd, const char *name)
338{
339#ifdef _KERNEL
340	char buf[1024];
341	char class[64];
342	struct sbuf sb;
343	struct timespec ts;
344	int error;
345
346	nanotime(&ts);
347
348	sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN);
349	sbuf_printf(&sb, "time=%ju.%ld", (uintmax_t)ts.tv_sec, ts.tv_nsec);
350
351	snprintf(class, sizeof(class), "%s.%s.%s", FM_RSRC_RESOURCE,
352	    ZFS_ERROR_CLASS, name);
353	sbuf_printf(&sb, " %s=%hhu", FM_VERSION, FM_RSRC_VERSION);
354	sbuf_printf(&sb, " %s=%s", FM_CLASS, class);
355	sbuf_printf(&sb, " %s=%ju", FM_EREPORT_PAYLOAD_ZFS_POOL_GUID,
356	    spa_guid(spa));
357	if (vd)
358		sbuf_printf(&sb, " %s=%ju", FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID,
359		    vd->vdev_guid);
360	error = sbuf_finish(&sb);
361	ZFS_LOG(1, "%s", sbuf_data(&sb));
362	devctl_notify("ZFS", spa->spa_name, class, sbuf_data(&sb));
363	if (error != 0)
364		printf("ZFS WARNING: sbuf overflowed\n");
365	sbuf_delete(&sb);
366#endif
367}
368
369/*
370 * The 'resource.fs.zfs.removed' event is an internal signal that the given vdev
371 * has been removed from the system.  This will cause the DE to ignore any
372 * recent I/O errors, inferring that they are due to the asynchronous device
373 * removal.
374 */
375void
376zfs_post_remove(spa_t *spa, vdev_t *vd)
377{
378	zfs_post_common(spa, vd, FM_RESOURCE_REMOVED);
379}
380
381/*
382 * The 'resource.fs.zfs.autoreplace' event is an internal signal that the pool
383 * has the 'autoreplace' property set, and therefore any broken vdevs will be
384 * handled by higher level logic, and no vdev fault should be generated.
385 */
386void
387zfs_post_autoreplace(spa_t *spa, vdev_t *vd)
388{
389	zfs_post_common(spa, vd, FM_RESOURCE_AUTOREPLACE);
390}
391