1/*-
2 * Copyright (c) 2011, 2012, 2013, 2014, 2016 Spectra Logic Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions, and the following disclaimer,
10 *    without modification.
11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12 *    substantially similar to the "NO WARRANTY" disclaimer below
13 *    ("Disclaimer") and any redistribution must be conditioned upon
14 *    including a substantially similar Disclaimer requirement for further
15 *    binary redistribution.
16 *
17 * NO WARRANTY
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
26 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
27 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGES.
29 *
30 * Authors: Justin T. Gibbs     (Spectra Logic Corporation)
31 */
32
33/**
34 * \file zfsd_event.cc
35 */
36#include <sys/cdefs.h>
37#include <sys/time.h>
38#include <sys/fs/zfs.h>
39#include <sys/vdev_impl.h>
40
41#include <syslog.h>
42
43#include <libzfs.h>
44/*
45 * Undefine flush, defined by cpufunc.h on sparc64, because it conflicts with
46 * C++ flush methods
47 */
48#undef   flush
49
50#include <list>
51#include <map>
52#include <sstream>
53#include <string>
54
55#include <devdctl/guid.h>
56#include <devdctl/event.h>
57#include <devdctl/event_factory.h>
58#include <devdctl/exception.h>
59#include <devdctl/consumer.h>
60
61#include "callout.h"
62#include "vdev_iterator.h"
63#include "zfsd_event.h"
64#include "case_file.h"
65#include "vdev.h"
66#include "zfsd.h"
67#include "zfsd_exception.h"
68#include "zpool_list.h"
69
70__FBSDID("$FreeBSD$");
71/*============================ Namespace Control =============================*/
72using DevdCtl::Event;
73using DevdCtl::Guid;
74using DevdCtl::NVPairMap;
75using std::stringstream;
76
77/*=========================== Class Implementations ==========================*/
78
79/*-------------------------------- GeomEvent --------------------------------*/
80
81//- GeomEvent Static Public Methods -------------------------------------------
82Event *
83GeomEvent::Builder(Event::Type type,
84		   NVPairMap &nvPairs,
85		   const string &eventString)
86{
87	return (new GeomEvent(type, nvPairs, eventString));
88}
89
90//- GeomEvent Virtual Public Methods ------------------------------------------
91Event *
92GeomEvent::DeepCopy() const
93{
94	return (new GeomEvent(*this));
95}
96
97bool
98GeomEvent::Process() const
99{
100	/*
101	 * We only use GEOM events to repair damaged pools.  So return early if
102	 * there are no damaged pools
103	 */
104	if (CaseFile::Empty())
105		return (false);
106
107	/*
108	 * We are only concerned with arrivals and physical path changes,
109	 * because those can be used to satisfy online and autoreplace
110	 * operations
111	 */
112	if (Value("type") != "GEOM::physpath" && Value("type") != "CREATE")
113		return (false);
114
115	/* Log the event since it is of interest. */
116	Log(LOG_INFO);
117
118	string devPath;
119	if (!DevPath(devPath))
120		return (false);
121
122	int devFd(open(devPath.c_str(), O_RDONLY));
123	if (devFd == -1)
124		return (false);
125
126	bool inUse;
127	bool degraded;
128	nvlist_t *devLabel(ReadLabel(devFd, inUse, degraded));
129
130	string physPath;
131        bool havePhysPath(PhysicalPath(physPath));
132
133	string devName;
134	DevName(devName);
135	close(devFd);
136
137	if (inUse && devLabel != NULL) {
138		OnlineByLabel(devPath, physPath, devLabel);
139	} else if (degraded) {
140		syslog(LOG_INFO, "%s is marked degraded.  Ignoring "
141		       "as a replace by physical path candidate.\n",
142		       devName.c_str());
143	} else if (havePhysPath) {
144		/*
145		 * TODO: attempt to resolve events using every casefile
146		 * that matches this physpath
147		 */
148		CaseFile *caseFile(CaseFile::Find(physPath));
149		if (caseFile != NULL) {
150			syslog(LOG_INFO,
151			       "Found CaseFile(%s:%s:%s) - ReEvaluating\n",
152			       caseFile->PoolGUIDString().c_str(),
153			       caseFile->VdevGUIDString().c_str(),
154			       zpool_state_to_name(caseFile->VdevState(),
155						   VDEV_AUX_NONE));
156			caseFile->ReEvaluate(devPath, physPath, /*vdev*/NULL);
157		}
158	}
159	return (false);
160}
161
162//- GeomEvent Protected Methods -----------------------------------------------
163GeomEvent::GeomEvent(Event::Type type, NVPairMap &nvpairs,
164			       const string &eventString)
165 : DevdCtl::GeomEvent(type, nvpairs, eventString)
166{
167}
168
169GeomEvent::GeomEvent(const GeomEvent &src)
170 : DevdCtl::GeomEvent::GeomEvent(src)
171{
172}
173
174nvlist_t *
175GeomEvent::ReadLabel(int devFd, bool &inUse, bool &degraded)
176{
177	pool_state_t poolState;
178	char        *poolName;
179	boolean_t    b_inuse;
180	int          nlabels;
181
182	inUse    = false;
183	degraded = false;
184	poolName = NULL;
185	if (zpool_in_use(g_zfsHandle, devFd, &poolState,
186			 &poolName, &b_inuse) == 0) {
187		nvlist_t *devLabel = NULL;
188
189		inUse = b_inuse == B_TRUE;
190		if (poolName != NULL)
191			free(poolName);
192
193		nlabels = zpool_read_all_labels(devFd, &devLabel);
194		/*
195		 * If we find a disk with fewer than the maximum number of
196		 * labels, it might be the whole disk of a partitioned disk
197		 * where ZFS resides on a partition.  In that case, we should do
198		 * nothing and wait for the partition to appear.  Or, the disk
199		 * might be damaged.  In that case, zfsd should do nothing and
200		 * wait for the sysadmin to decide.
201		 */
202		if (nlabels != VDEV_LABELS || devLabel == NULL) {
203			nvlist_free(devLabel);
204			return (NULL);
205		}
206
207		try {
208			Vdev vdev(devLabel);
209			degraded = vdev.State() != VDEV_STATE_HEALTHY;
210			return (devLabel);
211		} catch (ZfsdException &exp) {
212			string devName = fdevname(devFd);
213			string devPath = _PATH_DEV + devName;
214			string context("GeomEvent::ReadLabel: "
215				     + devPath + ": ");
216
217			exp.GetString().insert(0, context);
218			exp.Log();
219			nvlist_free(devLabel);
220		}
221	}
222	return (NULL);
223}
224
225bool
226GeomEvent::OnlineByLabel(const string &devPath, const string& physPath,
227			      nvlist_t *devConfig)
228{
229	try {
230		/*
231		 * A device with ZFS label information has been
232		 * inserted.  If it matches a device for which we
233		 * have a case, see if we can solve that case.
234		 */
235		syslog(LOG_INFO, "Interrogating VDEV label for %s\n",
236		       devPath.c_str());
237		Vdev vdev(devConfig);
238		CaseFile *caseFile(CaseFile::Find(vdev.PoolGUID(),
239						  vdev.GUID()));
240		if (caseFile != NULL)
241			return (caseFile->ReEvaluate(devPath, physPath, &vdev));
242
243	} catch (ZfsdException &exp) {
244		string context("GeomEvent::OnlineByLabel: " + devPath + ": ");
245
246		exp.GetString().insert(0, context);
247		exp.Log();
248	}
249	return (false);
250}
251
252
253/*--------------------------------- ZfsEvent ---------------------------------*/
254//- ZfsEvent Static Public Methods ---------------------------------------------
255DevdCtl::Event *
256ZfsEvent::Builder(Event::Type type, NVPairMap &nvpairs,
257		  const string &eventString)
258{
259	return (new ZfsEvent(type, nvpairs, eventString));
260}
261
262//- ZfsEvent Virtual Public Methods --------------------------------------------
263Event *
264ZfsEvent::DeepCopy() const
265{
266	return (new ZfsEvent(*this));
267}
268
269bool
270ZfsEvent::Process() const
271{
272	string logstr("");
273
274	if (!Contains("class") && !Contains("type")) {
275		syslog(LOG_ERR,
276		       "ZfsEvent::Process: Missing class or type data.");
277		return (false);
278	}
279
280	/* On config syncs, replay any queued events first. */
281	if (Value("type").find("misc.fs.zfs.config_sync") == 0) {
282		/*
283		 * Even if saved events are unconsumed the second time
284		 * around, drop them.  Any events that still can't be
285		 * consumed are probably referring to vdevs or pools that
286		 * no longer exist.
287		 */
288		ZfsDaemon::Get().ReplayUnconsumedEvents(/*discard*/true);
289		CaseFile::ReEvaluateByGuid(PoolGUID(), *this);
290	}
291
292	if (Value("type").find("misc.fs.zfs.") == 0) {
293		/* Configuration changes, resilver events, etc. */
294		ProcessPoolEvent();
295		return (false);
296	}
297
298	if (!Contains("pool_guid") || !Contains("vdev_guid")) {
299		/* Only currently interested in Vdev related events. */
300		return (false);
301	}
302
303	CaseFile *caseFile(CaseFile::Find(PoolGUID(), VdevGUID()));
304	if (caseFile != NULL) {
305		Log(LOG_INFO);
306		syslog(LOG_INFO, "Evaluating existing case file\n");
307		caseFile->ReEvaluate(*this);
308		return (false);
309	}
310
311	/* Skip events that can't be handled. */
312	Guid poolGUID(PoolGUID());
313	/* If there are no replicas for a pool, then it's not manageable. */
314	if (Value("class").find("fs.zfs.vdev.no_replicas") == 0) {
315		stringstream msg;
316		msg << "No replicas available for pool "  << poolGUID;
317		msg << ", ignoring";
318		Log(LOG_INFO);
319		syslog(LOG_INFO, "%s", msg.str().c_str());
320		return (false);
321	}
322
323	/*
324	 * Create a case file for this vdev, and have it
325	 * evaluate the event.
326	 */
327	ZpoolList zpl(ZpoolList::ZpoolByGUID, &poolGUID);
328	if (zpl.empty()) {
329		stringstream msg;
330		int priority = LOG_INFO;
331		msg << "ZfsEvent::Process: Event for unknown pool ";
332		msg << poolGUID << " ";
333		msg << "queued";
334		Log(LOG_INFO);
335		syslog(priority, "%s", msg.str().c_str());
336		return (true);
337	}
338
339	nvlist_t *vdevConfig = VdevIterator(zpl.front()).Find(VdevGUID());
340	if (vdevConfig == NULL) {
341		stringstream msg;
342		int priority = LOG_INFO;
343		msg << "ZfsEvent::Process: Event for unknown vdev ";
344		msg << VdevGUID() << " ";
345		msg << "queued";
346		Log(LOG_INFO);
347		syslog(priority, "%s", msg.str().c_str());
348		return (true);
349	}
350
351	Vdev vdev(zpl.front(), vdevConfig);
352	caseFile = &CaseFile::Create(vdev);
353	if (caseFile->ReEvaluate(*this) == false) {
354		stringstream msg;
355		int priority = LOG_INFO;
356		msg << "ZfsEvent::Process: Unconsumed event for vdev(";
357		msg << zpool_get_name(zpl.front()) << ",";
358		msg << vdev.GUID() << ") ";
359		msg << "queued";
360		Log(LOG_INFO);
361		syslog(priority, "%s", msg.str().c_str());
362		return (true);
363	}
364	return (false);
365}
366
367//- ZfsEvent Protected Methods -------------------------------------------------
368ZfsEvent::ZfsEvent(Event::Type type, NVPairMap &nvpairs,
369			   const string &eventString)
370 : DevdCtl::ZfsEvent(type, nvpairs, eventString)
371{
372}
373
374ZfsEvent::ZfsEvent(const ZfsEvent &src)
375 : DevdCtl::ZfsEvent(src)
376{
377}
378
379/*
380 * Sometimes the kernel won't detach a spare when it is no longer needed.  This
381 * can happen for example if a drive is removed, then either the pool is
382 * exported or the machine is powered off, then the drive is reinserted, then
383 * the machine is powered on or the pool is imported.  ZFSD must detach these
384 * spares itself.
385 */
386void
387ZfsEvent::CleanupSpares() const
388{
389	Guid poolGUID(PoolGUID());
390	ZpoolList zpl(ZpoolList::ZpoolByGUID, &poolGUID);
391	if (!zpl.empty()) {
392		zpool_handle_t* hdl;
393
394		hdl = zpl.front();
395		VdevIterator(hdl).Each(TryDetach, (void*)hdl);
396	}
397}
398
399void
400ZfsEvent::ProcessPoolEvent() const
401{
402	bool degradedDevice(false);
403
404	/* The pool is destroyed.  Discard any open cases */
405	if (Value("type") == "misc.fs.zfs.pool_destroy") {
406		Log(LOG_INFO);
407		CaseFile::ReEvaluateByGuid(PoolGUID(), *this);
408		return;
409	}
410
411	CaseFile *caseFile(CaseFile::Find(PoolGUID(), VdevGUID()));
412	if (caseFile != NULL) {
413		if (caseFile->VdevState() != VDEV_STATE_UNKNOWN
414		 && caseFile->VdevState() < VDEV_STATE_HEALTHY)
415			degradedDevice = true;
416
417		Log(LOG_INFO);
418		caseFile->ReEvaluate(*this);
419	}
420	else if (Value("type") == "misc.fs.zfs.resilver_finish")
421	{
422		/*
423		 * It's possible to get a resilver_finish event with no
424		 * corresponding casefile.  For example, if a damaged pool were
425		 * exported, repaired, then reimported.
426		 */
427		Log(LOG_INFO);
428		CleanupSpares();
429	}
430
431	if (Value("type") == "misc.fs.zfs.vdev_remove"
432	 && degradedDevice == false) {
433
434		/* See if any other cases can make use of this device. */
435		Log(LOG_INFO);
436		ZfsDaemon::RequestSystemRescan();
437	}
438}
439
440bool
441ZfsEvent::TryDetach(Vdev &vdev, void *cbArg)
442{
443	/*
444	 * Outline:
445	 * if this device is a spare, and its parent includes one healthy,
446	 * non-spare child, then detach this device.
447	 */
448	zpool_handle_t *hdl(static_cast<zpool_handle_t*>(cbArg));
449
450	if (vdev.IsSpare()) {
451		std::list<Vdev> siblings;
452		std::list<Vdev>::iterator siblings_it;
453		boolean_t cleanup = B_FALSE;
454
455		Vdev parent = vdev.Parent();
456		siblings = parent.Children();
457
458		/* Determine whether the parent should be cleaned up */
459		for (siblings_it = siblings.begin();
460		     siblings_it != siblings.end();
461		     siblings_it++) {
462			Vdev sibling = *siblings_it;
463
464			if (!sibling.IsSpare() &&
465			     sibling.State() == VDEV_STATE_HEALTHY) {
466				cleanup = B_TRUE;
467				break;
468			}
469		}
470
471		if (cleanup) {
472			syslog(LOG_INFO, "Detaching spare vdev %s from pool %s",
473			       vdev.Path().c_str(), zpool_get_name(hdl));
474			zpool_vdev_detach(hdl, vdev.Path().c_str());
475		}
476
477	}
478
479	/* Always return false, because there may be other spares to detach */
480	return (false);
481}
482