vdev_geom.c revision 203504
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
23 * All rights reserved.
24 */
25
26#include <sys/zfs_context.h>
27#include <sys/param.h>
28#include <sys/kernel.h>
29#include <sys/bio.h>
30#include <sys/disk.h>
31#include <sys/spa.h>
32#include <sys/vdev_impl.h>
33#include <sys/fs/zfs.h>
34#include <sys/zio.h>
35#include <geom/geom.h>
36#include <geom/geom_int.h>
37
38/*
39 * Virtual device vector for GEOM.
40 */
41
42struct g_class zfs_vdev_class = {
43	.name = "ZFS::VDEV",
44	.version = G_VERSION,
45};
46
47DECLARE_GEOM_CLASS(zfs_vdev_class, zfs_vdev);
48
49typedef struct vdev_geom_ctx {
50	struct g_consumer *gc_consumer;
51	int gc_state;
52	struct bio_queue_head gc_queue;
53	struct mtx gc_queue_mtx;
54} vdev_geom_ctx_t;
55
56static void
57vdev_geom_release(vdev_t *vd)
58{
59	vdev_geom_ctx_t *ctx;
60
61	ctx = vd->vdev_tsd;
62	vd->vdev_tsd = NULL;
63
64	mtx_lock(&ctx->gc_queue_mtx);
65	ctx->gc_state = 1;
66	wakeup_one(&ctx->gc_queue);
67	while (ctx->gc_state != 2)
68		msleep(&ctx->gc_state, &ctx->gc_queue_mtx, 0, "vgeom:w", 0);
69	mtx_unlock(&ctx->gc_queue_mtx);
70	mtx_destroy(&ctx->gc_queue_mtx);
71	kmem_free(ctx, sizeof(*ctx));
72}
73
74static void
75vdev_geom_orphan(struct g_consumer *cp)
76{
77	struct g_geom *gp;
78	vdev_t *vd;
79	int error;
80
81	g_topology_assert();
82
83	vd = cp->private;
84	gp = cp->geom;
85	error = cp->provider->error;
86
87	ZFS_LOG(1, "Closing access to %s.", cp->provider->name);
88	if (cp->acr + cp->acw + cp->ace > 0)
89		g_access(cp, -cp->acr, -cp->acw, -cp->ace);
90	ZFS_LOG(1, "Destroyed consumer to %s.", cp->provider->name);
91	g_detach(cp);
92	g_destroy_consumer(cp);
93	/* Destroy geom if there are no consumers left. */
94	if (LIST_EMPTY(&gp->consumer)) {
95		ZFS_LOG(1, "Destroyed geom %s.", gp->name);
96		g_wither_geom(gp, error);
97	}
98	vdev_geom_release(vd);
99
100	vd->vdev_remove_wanted = B_TRUE;
101	spa_async_request(vd->vdev_spa, SPA_ASYNC_REMOVE);
102}
103
104static struct g_consumer *
105vdev_geom_attach(struct g_provider *pp)
106{
107	struct g_geom *gp;
108	struct g_consumer *cp;
109
110	g_topology_assert();
111
112	ZFS_LOG(1, "Attaching to %s.", pp->name);
113	/* Do we have geom already? No? Create one. */
114	LIST_FOREACH(gp, &zfs_vdev_class.geom, geom) {
115		if (gp->flags & G_GEOM_WITHER)
116			continue;
117		if (strcmp(gp->name, "zfs::vdev") != 0)
118			continue;
119		break;
120	}
121	if (gp == NULL) {
122		gp = g_new_geomf(&zfs_vdev_class, "zfs::vdev");
123		gp->orphan = vdev_geom_orphan;
124		cp = g_new_consumer(gp);
125		if (g_attach(cp, pp) != 0) {
126			g_wither_geom(gp, ENXIO);
127			return (NULL);
128		}
129		if (g_access(cp, 1, 0, 1) != 0) {
130			g_wither_geom(gp, ENXIO);
131			return (NULL);
132		}
133		ZFS_LOG(1, "Created geom and consumer for %s.", pp->name);
134	} else {
135		/* Check if we are already connected to this provider. */
136		LIST_FOREACH(cp, &gp->consumer, consumer) {
137			if (cp->provider == pp) {
138				ZFS_LOG(1, "Found consumer for %s.", pp->name);
139				break;
140			}
141		}
142		if (cp == NULL) {
143			cp = g_new_consumer(gp);
144			if (g_attach(cp, pp) != 0) {
145				g_destroy_consumer(cp);
146				return (NULL);
147			}
148			if (g_access(cp, 1, 0, 1) != 0) {
149				g_detach(cp);
150				g_destroy_consumer(cp);
151				return (NULL);
152			}
153			ZFS_LOG(1, "Created consumer for %s.", pp->name);
154		} else {
155			if (g_access(cp, 1, 0, 1) != 0)
156				return (NULL);
157			ZFS_LOG(1, "Used existing consumer for %s.", pp->name);
158		}
159	}
160	return (cp);
161}
162
163static void
164vdev_geom_detach(void *arg, int flag __unused)
165{
166	struct g_geom *gp;
167	struct g_consumer *cp;
168
169	g_topology_assert();
170	cp = arg;
171	gp = cp->geom;
172
173	ZFS_LOG(1, "Closing access to %s.", cp->provider->name);
174	g_access(cp, -1, 0, -1);
175	/* Destroy consumer on last close. */
176	if (cp->acr == 0 && cp->ace == 0) {
177		ZFS_LOG(1, "Destroyed consumer to %s.", cp->provider->name);
178		if (cp->acw > 0)
179			g_access(cp, 0, -cp->acw, 0);
180		g_detach(cp);
181		g_destroy_consumer(cp);
182	}
183	/* Destroy geom if there are no consumers left. */
184	if (LIST_EMPTY(&gp->consumer)) {
185		ZFS_LOG(1, "Destroyed geom %s.", gp->name);
186		g_wither_geom(gp, ENXIO);
187	}
188}
189
190static void
191vdev_geom_worker(void *arg)
192{
193	vdev_geom_ctx_t *ctx;
194	zio_t *zio;
195	struct bio *bp;
196
197	thread_lock(curthread);
198	sched_prio(curthread, PRIBIO);
199	thread_unlock(curthread);
200
201	ctx = arg;
202	for (;;) {
203		mtx_lock(&ctx->gc_queue_mtx);
204		bp = bioq_takefirst(&ctx->gc_queue);
205		if (bp == NULL) {
206			if (ctx->gc_state == 1) {
207				ctx->gc_state = 2;
208				wakeup_one(&ctx->gc_state);
209				mtx_unlock(&ctx->gc_queue_mtx);
210				kthread_exit();
211			}
212			msleep(&ctx->gc_queue, &ctx->gc_queue_mtx,
213			    PRIBIO | PDROP, "vgeom:io", 0);
214			continue;
215		}
216		mtx_unlock(&ctx->gc_queue_mtx);
217		zio = bp->bio_caller1;
218		zio->io_error = bp->bio_error;
219		if (bp->bio_cmd == BIO_FLUSH && bp->bio_error == ENOTSUP) {
220			vdev_t *vd;
221
222			/*
223			 * If we get ENOTSUP, we know that no future
224			 * attempts will ever succeed.  In this case we
225			 * set a persistent bit so that we don't bother
226			 * with the ioctl in the future.
227			 */
228			vd = zio->io_vd;
229			vd->vdev_nowritecache = B_TRUE;
230		}
231		g_destroy_bio(bp);
232		zio_interrupt(zio);
233	}
234}
235
236static uint64_t
237nvlist_get_guid(nvlist_t *list)
238{
239	nvpair_t *elem = NULL;
240	uint64_t value;
241
242	while ((elem = nvlist_next_nvpair(list, elem)) != NULL) {
243		if (nvpair_type(elem) == DATA_TYPE_UINT64 &&
244		    strcmp(nvpair_name(elem), "guid") == 0) {
245			VERIFY(nvpair_value_uint64(elem, &value) == 0);
246			return (value);
247		}
248	}
249	return (0);
250}
251
252static int
253vdev_geom_io(struct g_consumer *cp, int cmd, void *data, off_t offset, off_t size)
254{
255	struct bio *bp;
256	u_char *p;
257	off_t off;
258	int error;
259
260	ASSERT((offset % cp->provider->sectorsize) == 0);
261	ASSERT((size % cp->provider->sectorsize) == 0);
262
263	bp = g_alloc_bio();
264	off = offset;
265	offset += size;
266	p = data;
267	error = 0;
268
269	for (; off < offset; off += MAXPHYS, p += MAXPHYS, size -= MAXPHYS) {
270		bzero(bp, sizeof(*bp));
271		bp->bio_cmd = cmd;
272		bp->bio_done = NULL;
273		bp->bio_offset = off;
274		bp->bio_length = MIN(size, MAXPHYS);
275		bp->bio_data = p;
276		g_io_request(bp, cp);
277		error = biowait(bp, "vdev_geom_io");
278		if (error != 0)
279			break;
280	}
281
282	g_destroy_bio(bp);
283	return (error);
284}
285
286static uint64_t
287vdev_geom_read_guid(struct g_consumer *cp)
288{
289	struct g_provider *pp;
290	vdev_label_t *label;
291	char *p, *buf;
292	size_t buflen;
293	uint64_t psize;
294	off_t offset, size;
295	uint64_t guid;
296	int error, l, len, iszvol;
297
298	g_topology_assert_not();
299
300	pp = cp->provider;
301	ZFS_LOG(1, "Reading guid from %s...", pp->name);
302	if (g_getattr("ZFS::iszvol", cp, &iszvol) == 0 && iszvol) {
303		ZFS_LOG(1, "Skipping ZVOL-based provider %s.", pp->name);
304		return (0);
305	}
306
307	psize = pp->mediasize;
308	psize = P2ALIGN(psize, (uint64_t)sizeof(vdev_label_t));
309
310	size = sizeof(*label) + pp->sectorsize -
311	    ((sizeof(*label) - 1) % pp->sectorsize) - 1;
312
313	guid = 0;
314	label = kmem_alloc(size, KM_SLEEP);
315	buflen = sizeof(label->vl_vdev_phys.vp_nvlist);
316
317	for (l = 0; l < VDEV_LABELS; l++) {
318		nvlist_t *config = NULL;
319
320		offset = vdev_label_offset(psize, l, 0);
321		if ((offset % pp->sectorsize) != 0)
322			continue;
323
324		if (vdev_geom_io(cp, BIO_READ, label, offset, size) != 0)
325			continue;
326		buf = label->vl_vdev_phys.vp_nvlist;
327
328		if (nvlist_unpack(buf, buflen, &config, 0) != 0)
329			continue;
330
331		guid = nvlist_get_guid(config);
332		nvlist_free(config);
333		if (guid != 0)
334			break;
335	}
336
337	kmem_free(label, size);
338	if (guid != 0)
339		ZFS_LOG(1, "guid for %s is %ju", pp->name, (uintmax_t)guid);
340	return (guid);
341}
342
343struct vdev_geom_find {
344	uint64_t guid;
345	struct g_consumer *cp;
346};
347
348static void
349vdev_geom_taste_orphan(struct g_consumer *cp)
350{
351
352	KASSERT(1 == 0, ("%s called while tasting %s.", __func__,
353	    cp->provider->name));
354}
355
356static void
357vdev_geom_attach_by_guid_event(void *arg, int flags __unused)
358{
359	struct vdev_geom_find *ap;
360	struct g_class *mp;
361	struct g_geom *gp, *zgp;
362	struct g_provider *pp;
363	struct g_consumer *zcp;
364	uint64_t guid;
365
366	g_topology_assert();
367
368	ap = arg;
369
370	zgp = g_new_geomf(&zfs_vdev_class, "zfs::vdev::taste");
371	/* This orphan function should be never called. */
372	zgp->orphan = vdev_geom_taste_orphan;
373	zcp = g_new_consumer(zgp);
374
375	LIST_FOREACH(mp, &g_classes, class) {
376		if (mp == &zfs_vdev_class)
377			continue;
378		LIST_FOREACH(gp, &mp->geom, geom) {
379			if (gp->flags & G_GEOM_WITHER)
380				continue;
381			LIST_FOREACH(pp, &gp->provider, provider) {
382				if (pp->flags & G_PF_WITHER)
383					continue;
384				g_attach(zcp, pp);
385				if (g_access(zcp, 1, 0, 0) != 0) {
386					g_detach(zcp);
387					continue;
388				}
389				g_topology_unlock();
390				guid = vdev_geom_read_guid(zcp);
391				g_topology_lock();
392				g_access(zcp, -1, 0, 0);
393				g_detach(zcp);
394				if (guid != ap->guid)
395					continue;
396				ap->cp = vdev_geom_attach(pp);
397				if (ap->cp == NULL) {
398					printf("ZFS WARNING: Unable to attach to %s.",
399					    pp->name);
400					continue;
401				}
402				goto end;
403			}
404		}
405	}
406	ap->cp = NULL;
407end:
408	g_destroy_consumer(zcp);
409	g_destroy_geom(zgp);
410}
411
412static struct g_consumer *
413vdev_geom_attach_by_guid(uint64_t guid)
414{
415	struct vdev_geom_find *ap;
416	struct g_consumer *cp;
417
418	ap = kmem_zalloc(sizeof(*ap), KM_SLEEP);
419	ap->guid = guid;
420	g_waitfor_event(vdev_geom_attach_by_guid_event, ap, M_WAITOK, NULL);
421	cp = ap->cp;
422	kmem_free(ap, sizeof(*ap));
423	return (cp);
424}
425
426static struct g_consumer *
427vdev_geom_open_by_guid(vdev_t *vd)
428{
429	struct g_consumer *cp;
430	char *buf;
431	size_t len;
432
433	ZFS_LOG(1, "Searching by guid [%ju].", (uintmax_t)vd->vdev_guid);
434	cp = vdev_geom_attach_by_guid(vd->vdev_guid);
435	if (cp != NULL) {
436		len = strlen(cp->provider->name) + strlen("/dev/") + 1;
437		buf = kmem_alloc(len, KM_SLEEP);
438
439		snprintf(buf, len, "/dev/%s", cp->provider->name);
440		spa_strfree(vd->vdev_path);
441		vd->vdev_path = buf;
442
443		ZFS_LOG(1, "Attach by guid [%ju] succeeded, provider %s.",
444		    (uintmax_t)vd->vdev_guid, vd->vdev_path);
445	} else {
446		ZFS_LOG(1, "Search by guid [%ju] failed.",
447		    (uintmax_t)vd->vdev_guid);
448	}
449
450	return (cp);
451}
452
453static struct g_consumer *
454vdev_geom_open_by_path(vdev_t *vd, int check_guid)
455{
456	struct g_provider *pp;
457	struct g_consumer *cp;
458	uint64_t guid;
459
460	cp = NULL;
461	g_topology_lock();
462	pp = g_provider_by_name(vd->vdev_path + sizeof("/dev/") - 1);
463	if (pp != NULL) {
464		ZFS_LOG(1, "Found provider by name %s.", vd->vdev_path);
465		cp = vdev_geom_attach(pp);
466		if (cp != NULL && check_guid) {
467			g_topology_unlock();
468			guid = vdev_geom_read_guid(cp);
469			g_topology_lock();
470			if (guid != vd->vdev_guid) {
471				vdev_geom_detach(cp, 0);
472				cp = NULL;
473				ZFS_LOG(1, "guid mismatch for provider %s: "
474				    "%ju != %ju.", vd->vdev_path,
475				    (uintmax_t)vd->vdev_guid, (uintmax_t)guid);
476			} else {
477				ZFS_LOG(1, "guid match for provider %s.",
478				    vd->vdev_path);
479			}
480		}
481	}
482	g_topology_unlock();
483
484	return (cp);
485}
486
487static int
488vdev_geom_open(vdev_t *vd, uint64_t *psize, uint64_t *ashift)
489{
490	vdev_geom_ctx_t *ctx;
491	struct g_provider *pp;
492	struct g_consumer *cp;
493	int error, owned;
494
495	/*
496	 * We must have a pathname, and it must be absolute.
497	 */
498	if (vd->vdev_path == NULL || vd->vdev_path[0] != '/') {
499		vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
500		return (EINVAL);
501	}
502
503	vd->vdev_tsd = NULL;
504
505	if ((owned = mtx_owned(&Giant)))
506		mtx_unlock(&Giant);
507	error = 0;
508	cp = vdev_geom_open_by_path(vd, 1);
509	if (cp == NULL) {
510		/*
511		 * The device at vd->vdev_path doesn't have the expected guid.
512		 * The disks might have merely moved around so try all other
513		 * geom providers to find one with the right guid.
514		 */
515		cp = vdev_geom_open_by_guid(vd);
516	}
517	if (cp == NULL)
518		cp = vdev_geom_open_by_path(vd, 0);
519	if (cp == NULL) {
520		ZFS_LOG(1, "Provider %s not found.", vd->vdev_path);
521		error = ENOENT;
522	} else if (cp->acw == 0 && (spa_mode & FWRITE) != 0) {
523		g_topology_lock();
524		error = g_access(cp, 0, 1, 0);
525		if (error != 0) {
526			printf("ZFS WARNING: Unable to open %s for writing (error=%d).",
527			    vd->vdev_path, error);
528			vdev_geom_detach(cp, 0);
529			cp = NULL;
530		}
531		g_topology_unlock();
532	}
533	if (owned)
534		mtx_lock(&Giant);
535	if (cp == NULL) {
536		vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED;
537		return (error);
538	}
539
540	cp->private = vd;
541
542	ctx = kmem_zalloc(sizeof(*ctx), KM_SLEEP);
543	bioq_init(&ctx->gc_queue);
544	mtx_init(&ctx->gc_queue_mtx, "zfs:vdev:geom:queue", NULL, MTX_DEF);
545	ctx->gc_consumer = cp;
546	ctx->gc_state = 0;
547
548	vd->vdev_tsd = ctx;
549	pp = cp->provider;
550
551	kproc_kthread_add(vdev_geom_worker, ctx, &zfsproc, NULL, 0, 0,
552	    "zfskern", "vdev %s", pp->name);
553
554	/*
555	 * Determine the actual size of the device.
556	 */
557	*psize = pp->mediasize;
558
559	/*
560	 * Determine the device's minimum transfer size.
561	 */
562	*ashift = highbit(MAX(pp->sectorsize, SPA_MINBLOCKSIZE)) - 1;
563
564	/*
565	 * Clear the nowritecache bit, so that on a vdev_reopen() we will
566	 * try again.
567	 */
568	vd->vdev_nowritecache = B_FALSE;
569
570	return (0);
571}
572
573static void
574vdev_geom_close(vdev_t *vd)
575{
576	vdev_geom_ctx_t *ctx;
577	struct g_consumer *cp;
578
579	if ((ctx = vd->vdev_tsd) == NULL)
580		return;
581	if ((cp = ctx->gc_consumer) == NULL)
582		return;
583	vdev_geom_release(vd);
584	g_post_event(vdev_geom_detach, cp, M_WAITOK, NULL);
585}
586
587static void
588vdev_geom_io_intr(struct bio *bp)
589{
590	vdev_geom_ctx_t *ctx;
591	zio_t *zio;
592
593	zio = bp->bio_caller1;
594	ctx = zio->io_vd->vdev_tsd;
595
596	if ((zio->io_error = bp->bio_error) == 0 && bp->bio_resid != 0)
597		zio->io_error = EIO;
598
599	mtx_lock(&ctx->gc_queue_mtx);
600	bioq_insert_tail(&ctx->gc_queue, bp);
601	wakeup_one(&ctx->gc_queue);
602	mtx_unlock(&ctx->gc_queue_mtx);
603}
604
605static int
606vdev_geom_io_start(zio_t *zio)
607{
608	vdev_t *vd;
609	vdev_geom_ctx_t *ctx;
610	struct g_consumer *cp;
611	struct bio *bp;
612	int error;
613
614	cp = NULL;
615
616	vd = zio->io_vd;
617	ctx = vd->vdev_tsd;
618	if (ctx != NULL)
619		cp = ctx->gc_consumer;
620
621	if (zio->io_type == ZIO_TYPE_IOCTL) {
622		/* XXPOLICY */
623		if (!vdev_readable(vd)) {
624			zio->io_error = ENXIO;
625			return (ZIO_PIPELINE_CONTINUE);
626		}
627
628		switch (zio->io_cmd) {
629
630		case DKIOCFLUSHWRITECACHE:
631
632			if (zfs_nocacheflush)
633				break;
634
635			if (vd->vdev_nowritecache) {
636				zio->io_error = ENOTSUP;
637				break;
638			}
639
640			goto sendreq;
641		default:
642			zio->io_error = ENOTSUP;
643		}
644
645		return (ZIO_PIPELINE_CONTINUE);
646	}
647sendreq:
648	if (cp == NULL) {
649		zio->io_error = ENXIO;
650		return (ZIO_PIPELINE_CONTINUE);
651	}
652	bp = g_alloc_bio();
653	bp->bio_caller1 = zio;
654	switch (zio->io_type) {
655	case ZIO_TYPE_READ:
656	case ZIO_TYPE_WRITE:
657		bp->bio_cmd = zio->io_type == ZIO_TYPE_READ ? BIO_READ : BIO_WRITE;
658		bp->bio_data = zio->io_data;
659		bp->bio_offset = zio->io_offset;
660		bp->bio_length = zio->io_size;
661		break;
662	case ZIO_TYPE_IOCTL:
663		bp->bio_cmd = BIO_FLUSH;
664		bp->bio_data = NULL;
665		bp->bio_offset = cp->provider->mediasize;
666		bp->bio_length = 0;
667		break;
668	}
669	bp->bio_done = vdev_geom_io_intr;
670
671	g_io_request(bp, cp);
672
673	return (ZIO_PIPELINE_STOP);
674}
675
676static void
677vdev_geom_io_done(zio_t *zio)
678{
679}
680
681vdev_ops_t vdev_geom_ops = {
682	vdev_geom_open,
683	vdev_geom_close,
684	vdev_default_asize,
685	vdev_geom_io_start,
686	vdev_geom_io_done,
687	NULL,
688	VDEV_TYPE_DISK,		/* name of this vdev type */
689	B_TRUE			/* leaf vdev */
690};
691