geom_vinum_drive.c revision 143259
1/*-
2 * Copyright (c) 2004, 2005 Lukas Ertl
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/geom/vinum/geom_vinum_drive.c 143259 2005-03-07 19:58:58Z le $");
29
30#include <sys/param.h>
31#include <sys/bio.h>
32#include <sys/errno.h>
33#include <sys/conf.h>
34#include <sys/kernel.h>
35#include <sys/kthread.h>
36#include <sys/libkern.h>
37#include <sys/lock.h>
38#include <sys/malloc.h>
39#include <sys/module.h>
40#include <sys/mutex.h>
41#include <sys/sbuf.h>
42#include <sys/systm.h>
43#include <sys/time.h>
44
45#include <geom/geom.h>
46#include <geom/vinum/geom_vinum_var.h>
47#include <geom/vinum/geom_vinum.h>
48#include <geom/vinum/geom_vinum_share.h>
49
50static void	gv_drive_worker(void *);
51void	gv_drive_modify(struct gv_drive *);
52
53void
54gv_config_new_drive(struct gv_drive *d)
55{
56	struct gv_hdr *vhdr;
57	struct gv_freelist *fl;
58
59	KASSERT(d != NULL, ("config_new_drive: NULL d"));
60
61	vhdr = g_malloc(sizeof(*vhdr), M_WAITOK | M_ZERO);
62	vhdr->magic = GV_MAGIC;
63	vhdr->config_length = GV_CFG_LEN;
64
65	bcopy(hostname, vhdr->label.sysname, GV_HOSTNAME_LEN);
66	strncpy(vhdr->label.name, d->name, GV_MAXDRIVENAME);
67	microtime(&vhdr->label.date_of_birth);
68
69	d->hdr = vhdr;
70
71	LIST_INIT(&d->subdisks);
72	LIST_INIT(&d->freelist);
73
74	fl = g_malloc(sizeof(struct gv_freelist), M_WAITOK | M_ZERO);
75	fl->offset = GV_DATA_START;
76	fl->size = d->avail;
77	LIST_INSERT_HEAD(&d->freelist, fl, freelist);
78	d->freelist_entries = 1;
79
80	TAILQ_INIT(&d->bqueue);
81	mtx_init(&d->bqueue_mtx, "gv_drive", NULL, MTX_DEF);
82	kthread_create(gv_drive_worker, d, NULL, 0, 0, "gv_d %s", d->name);
83	d->flags |= GV_DRIVE_THREAD_ACTIVE;
84}
85
86void
87gv_save_config_all(struct gv_softc *sc)
88{
89	struct gv_drive *d;
90
91	g_topology_assert();
92
93	LIST_FOREACH(d, &sc->drives, drive) {
94		if (d->geom == NULL)
95			continue;
96		gv_save_config(NULL, d, sc);
97	}
98}
99
100/* Save the vinum configuration back to disk. */
101void
102gv_save_config(struct g_consumer *cp, struct gv_drive *d, struct gv_softc *sc)
103{
104	struct g_geom *gp;
105	struct g_consumer *cp2;
106	struct gv_hdr *vhdr, *hdr;
107	struct sbuf *sb;
108	int error;
109
110	g_topology_assert();
111
112	KASSERT(d != NULL, ("gv_save_config: null d"));
113	KASSERT(sc != NULL, ("gv_save_config: null sc"));
114
115	if (cp == NULL) {
116		gp = d->geom;
117		KASSERT(gp != NULL, ("gv_save_config: null gp"));
118		cp2 = LIST_FIRST(&gp->consumer);
119		KASSERT(cp2 != NULL, ("gv_save_config: null cp2"));
120	} else
121		cp2 = cp;
122
123	vhdr = g_malloc(GV_HDR_LEN, M_WAITOK | M_ZERO);
124	vhdr->magic = GV_MAGIC;
125	vhdr->config_length = GV_CFG_LEN;
126
127	hdr = d->hdr;
128	if (hdr == NULL) {
129		printf("NULL hdr!!!\n");
130		g_free(vhdr);
131		return;
132	}
133	microtime(&hdr->label.last_update);
134	bcopy(&hdr->label, &vhdr->label, sizeof(struct gv_label));
135
136	sb = sbuf_new(NULL, NULL, GV_CFG_LEN, SBUF_FIXEDLEN);
137	gv_format_config(sc, sb, 1, NULL);
138	sbuf_finish(sb);
139
140	error = g_access(cp2, 0, 1, 0);
141	if (error) {
142		printf("g_access failed: %d\n", error);
143		sbuf_delete(sb);
144		return;
145	}
146	g_topology_unlock();
147
148	do {
149		error = g_write_data(cp2, GV_HDR_OFFSET, vhdr, GV_HDR_LEN);
150		if (error) {
151			printf("writing vhdr failed: %d", error);
152			break;
153		}
154
155		error = g_write_data(cp2, GV_CFG_OFFSET, sbuf_data(sb),
156		    GV_CFG_LEN);
157		if (error) {
158			printf("writing first config copy failed: %d", error);
159			break;
160		}
161
162		error = g_write_data(cp2, GV_CFG_OFFSET + GV_CFG_LEN,
163		    sbuf_data(sb), GV_CFG_LEN);
164		if (error)
165			printf("writing second config copy failed: %d", error);
166	} while (0);
167
168	g_topology_lock();
169	g_access(cp2, 0, -1, 0);
170	sbuf_delete(sb);
171	g_free(vhdr);
172
173	if (d->geom != NULL)
174		gv_drive_modify(d);
175}
176
177/* This resembles g_slice_access(). */
178static int
179gv_drive_access(struct g_provider *pp, int dr, int dw, int de)
180{
181	struct g_geom *gp;
182	struct g_consumer *cp;
183	struct g_provider *pp2;
184	struct gv_drive *d;
185	struct gv_sd *s, *s2;
186	int error;
187
188	gp = pp->geom;
189	cp = LIST_FIRST(&gp->consumer);
190	if (cp == NULL)
191		return (0);
192
193	d = gp->softc;
194
195	s = pp->private;
196	KASSERT(s != NULL, ("gv_drive_access: NULL s"));
197
198	LIST_FOREACH(s2, &d->subdisks, from_drive) {
199		if (s == s2)
200			continue;
201		if (s->drive_offset + s->size <= s2->drive_offset)
202			continue;
203		if (s2->drive_offset + s2->size <= s->drive_offset)
204			continue;
205
206		/* Overlap. */
207		pp2 = s2->provider;
208		KASSERT(s2 != NULL, ("gv_drive_access: NULL s2"));
209		if ((pp->acw + dw) > 0 && pp2->ace > 0) {
210			printf("FOOO: permission denied - e\n");
211			return (EPERM);
212		}
213		if ((pp->ace + de) > 0 && pp2->acw > 0) {
214			printf("FOOO: permission denied - w\n");
215			return (EPERM);
216		}
217	}
218
219#if 0
220	/* On first open, grab an extra "exclusive" bit */
221	if (cp->acr == 0 && cp->acw == 0 && cp->ace == 0)
222		de++;
223	/* ... and let go of it on last close */
224	if ((cp->acr + dr) == 0 && (cp->acw + dw) == 0 && (cp->ace + de) == 1)
225		de--;
226#endif
227	error = g_access(cp, dr, dw, de);
228	if (error) {
229		printf("FOOO: g_access failed: %d\n", error);
230	}
231	return (error);
232}
233
234static void
235gv_drive_done(struct bio *bp)
236{
237	struct gv_drive *d;
238	struct gv_bioq *bq;
239
240	/* Put the BIO on the worker queue again. */
241	d = bp->bio_from->geom->softc;
242	bp->bio_cflags |= GV_BIO_DONE;
243	bq = g_malloc(sizeof(*bq), M_NOWAIT | M_ZERO);
244	bq->bp = bp;
245	mtx_lock(&d->bqueue_mtx);
246	TAILQ_INSERT_TAIL(&d->bqueue, bq, queue);
247	wakeup(d);
248	mtx_unlock(&d->bqueue_mtx);
249}
250
251
252static void
253gv_drive_start(struct bio *bp)
254{
255	struct gv_drive *d;
256	struct gv_sd *s;
257	struct gv_bioq *bq;
258
259	switch (bp->bio_cmd) {
260	case BIO_READ:
261	case BIO_WRITE:
262	case BIO_DELETE:
263		break;
264	case BIO_GETATTR:
265	default:
266		g_io_deliver(bp, EOPNOTSUPP);
267		return;
268	}
269
270	s = bp->bio_to->private;
271	if ((s->state == GV_SD_DOWN) || (s->state == GV_SD_STALE)) {
272		g_io_deliver(bp, ENXIO);
273		return;
274	}
275
276	d = bp->bio_to->geom->softc;
277
278	/*
279	 * Put the BIO on the worker queue, where the worker thread will pick
280	 * it up.
281	 */
282	bq = g_malloc(sizeof(*bq), M_NOWAIT | M_ZERO);
283	bq->bp = bp;
284	mtx_lock(&d->bqueue_mtx);
285	TAILQ_INSERT_TAIL(&d->bqueue, bq, queue);
286	wakeup(d);
287	mtx_unlock(&d->bqueue_mtx);
288
289}
290
291static void
292gv_drive_worker(void *arg)
293{
294	struct bio *bp, *cbp;
295	struct g_geom *gp;
296	struct g_provider *pp;
297	struct gv_drive *d;
298	struct gv_sd *s;
299	struct gv_bioq *bq, *bq2;
300	int error;
301
302	d = arg;
303
304	mtx_lock(&d->bqueue_mtx);
305	for (;;) {
306		/* We were signaled to exit. */
307		if (d->flags & GV_DRIVE_THREAD_DIE)
308			break;
309
310		/* Take the first BIO from out queue. */
311		bq = TAILQ_FIRST(&d->bqueue);
312		if (bq == NULL) {
313			msleep(d, &d->bqueue_mtx, PRIBIO, "-", hz/10);
314			continue;
315 		}
316		TAILQ_REMOVE(&d->bqueue, bq, queue);
317		mtx_unlock(&d->bqueue_mtx);
318
319		bp = bq->bp;
320		g_free(bq);
321		pp = bp->bio_to;
322		gp = pp->geom;
323
324		/* Completed request. */
325		if (bp->bio_cflags & GV_BIO_DONE) {
326			error = bp->bio_error;
327
328			/* Deliver the original request. */
329			g_std_done(bp);
330
331			/* The request had an error, we need to clean up. */
332			if (error != 0) {
333				g_topology_lock();
334				gv_set_drive_state(d, GV_DRIVE_DOWN,
335				    GV_SETSTATE_FORCE | GV_SETSTATE_CONFIG);
336				g_wither_geom(d->geom, ENXIO);
337				g_topology_unlock();
338			}
339
340		/* New request, needs to be sent downwards. */
341		} else {
342			s = pp->private;
343
344			if ((s->state == GV_SD_DOWN) ||
345			    (s->state == GV_SD_STALE)) {
346				g_io_deliver(bp, ENXIO);
347				mtx_lock(&d->bqueue_mtx);
348				continue;
349			}
350			if (bp->bio_offset > s->size) {
351				g_io_deliver(bp, EINVAL);
352				mtx_lock(&d->bqueue_mtx);
353				continue;
354			}
355
356			cbp = g_clone_bio(bp);
357			if (cbp == NULL) {
358				g_io_deliver(bp, ENOMEM);
359				mtx_lock(&d->bqueue_mtx);
360				continue;
361			}
362			if (cbp->bio_offset + cbp->bio_length > s->size)
363				cbp->bio_length = s->size -
364				    cbp->bio_offset;
365			cbp->bio_done = gv_drive_done;
366			cbp->bio_offset += s->drive_offset;
367			g_io_request(cbp, LIST_FIRST(&gp->consumer));
368		}
369
370		mtx_lock(&d->bqueue_mtx);
371	}
372
373	TAILQ_FOREACH_SAFE(bq, &d->bqueue, queue, bq2) {
374		TAILQ_REMOVE(&d->bqueue, bq, queue);
375		mtx_unlock(&d->bqueue_mtx);
376		bp = bq->bp;
377		g_free(bq);
378		if (bp->bio_cflags & GV_BIO_DONE)
379			g_std_done(bp);
380		else
381			g_io_deliver(bp, ENXIO);
382		mtx_lock(&d->bqueue_mtx);
383	}
384	mtx_unlock(&d->bqueue_mtx);
385	d->flags |= GV_DRIVE_THREAD_DEAD;
386
387	kthread_exit(ENXIO);
388}
389
390
391static void
392gv_drive_orphan(struct g_consumer *cp)
393{
394	struct g_geom *gp;
395	struct gv_drive *d;
396	struct gv_sd *s;
397	int error;
398
399	g_topology_assert();
400	gp = cp->geom;
401	g_trace(G_T_TOPOLOGY, "gv_drive_orphan(%s)", gp->name);
402	if (cp->acr != 0 || cp->acw != 0 || cp->ace != 0)
403		g_access(cp, -cp->acr, -cp->acw, -cp->ace);
404	error = cp->provider->error;
405	if (error == 0)
406		error = ENXIO;
407	g_detach(cp);
408	g_destroy_consumer(cp);
409	if (!LIST_EMPTY(&gp->consumer))
410		return;
411	d = gp->softc;
412	if (d != NULL) {
413		printf("gvinum: lost drive '%s'\n", d->name);
414		d->geom = NULL;
415		LIST_FOREACH(s, &d->subdisks, from_drive) {
416			s->provider = NULL;
417			s->consumer = NULL;
418		}
419		gv_kill_drive_thread(d);
420		gv_set_drive_state(d, GV_DRIVE_DOWN,
421		    GV_SETSTATE_FORCE | GV_SETSTATE_CONFIG);
422	}
423	gp->softc = NULL;
424	g_wither_geom(gp, error);
425}
426
427static struct g_geom *
428gv_drive_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
429{
430	struct g_geom *gp, *gp2;
431	struct g_consumer *cp;
432	struct gv_drive *d;
433	struct gv_sd *s;
434	struct gv_softc *sc;
435	struct gv_freelist *fl;
436	struct gv_hdr *vhdr;
437	int error;
438	char *buf, errstr[ERRBUFSIZ];
439
440	vhdr = NULL;
441	d = NULL;
442
443	g_trace(G_T_TOPOLOGY, "gv_drive_taste(%s, %s)", mp->name, pp->name);
444	g_topology_assert();
445
446	/* Find the VINUM class and its associated geom. */
447	gp2 = find_vinum_geom();
448	if (gp2 == NULL)
449		return (NULL);
450	sc = gp2->softc;
451
452	gp = g_new_geomf(mp, "%s.vinumdrive", pp->name);
453	gp->start = gv_drive_start;
454	gp->orphan = gv_drive_orphan;
455	gp->access = gv_drive_access;
456	gp->start = gv_drive_start;
457
458	cp = g_new_consumer(gp);
459	g_attach(cp, pp);
460	error = g_access(cp, 1, 0, 0);
461	if (error) {
462		g_detach(cp);
463		g_destroy_consumer(cp);
464		g_destroy_geom(gp);
465		return (NULL);
466	}
467
468	g_topology_unlock();
469
470	/* Now check if the provided slice is a valid vinum drive. */
471	do {
472		vhdr = g_read_data(cp, GV_HDR_OFFSET, pp->sectorsize, &error);
473		if (vhdr == NULL || error != 0)
474			break;
475		if (vhdr->magic != GV_MAGIC) {
476			g_free(vhdr);
477			break;
478		}
479
480		/*
481		 * We have found a valid vinum drive.  Let's see if it is
482		 * already known in the configuration.  There's a chance that
483		 * the VINUMDRIVE class tastes before the VINUM class could
484		 * taste, so parse the configuration here too, just to be on
485		 * the safe side.
486		 */
487		buf = g_read_data(cp, GV_CFG_OFFSET, GV_CFG_LEN, &error);
488		if (buf == NULL || error != 0) {
489			g_free(vhdr);
490			break;
491		}
492		g_topology_lock();
493		gv_parse_config(sc, buf, 1);
494		g_free(buf);
495
496		d = gv_find_drive(sc, vhdr->label.name);
497
498		/* We already know about this drive. */
499		if (d != NULL) {
500			/* Check if this drive already has a geom. */
501			if (d->geom != NULL) {
502				g_topology_unlock();
503				break;
504			}
505			bcopy(vhdr, d->hdr, sizeof(*vhdr));
506
507		/* This is a new drive. */
508		} else {
509			d = g_malloc(sizeof(*d), M_WAITOK | M_ZERO);
510
511			/* Initialize all needed variables. */
512			d->size = pp->mediasize - GV_DATA_START;
513			d->avail = d->size;
514			d->hdr = vhdr;
515			strncpy(d->name, vhdr->label.name, GV_MAXDRIVENAME);
516			LIST_INIT(&d->subdisks);
517			LIST_INIT(&d->freelist);
518
519			/* We also need a freelist entry. */
520			fl = g_malloc(sizeof(*fl), M_WAITOK | M_ZERO);
521			fl->offset = GV_DATA_START;
522			fl->size = d->avail;
523			LIST_INSERT_HEAD(&d->freelist, fl, freelist);
524			d->freelist_entries = 1;
525
526			TAILQ_INIT(&d->bqueue);
527
528			/* Save it into the main configuration. */
529			LIST_INSERT_HEAD(&sc->drives, d, drive);
530		}
531
532		/*
533		 * Create a bio queue mutex and a worker thread, if necessary.
534		 */
535		if (mtx_initialized(&d->bqueue_mtx) == 0)
536			mtx_init(&d->bqueue_mtx, "gv_drive", NULL, MTX_DEF);
537
538		if (!(d->flags & GV_DRIVE_THREAD_ACTIVE)) {
539			kthread_create(gv_drive_worker, d, NULL, 0, 0,
540			    "gv_d %s", d->name);
541			d->flags |= GV_DRIVE_THREAD_ACTIVE;
542		}
543
544		g_access(cp, -1, 0, 0);
545
546		gp->softc = d;
547		d->geom = gp;
548		d->vinumconf = sc;
549		strncpy(d->device, pp->name, GV_MAXDRIVENAME);
550
551		/*
552		 * Find out which subdisks belong to this drive and crosslink
553		 * them.
554		 */
555		LIST_FOREACH(s, &sc->subdisks, sd) {
556			if (!strncmp(s->drive, d->name, GV_MAXDRIVENAME))
557				/* XXX: errors ignored */
558				gv_sd_to_drive(sc, d, s, errstr,
559				    sizeof(errstr));
560		}
561
562		/* This drive is now up for sure. */
563		gv_set_drive_state(d, GV_DRIVE_UP, 0);
564
565		/*
566		 * If there are subdisks on this drive, we need to create
567		 * providers for them.
568		 */
569		if (d->sdcount)
570			gv_drive_modify(d);
571
572		return (gp);
573
574	} while (0);
575
576	g_topology_lock();
577	g_access(cp, -1, 0, 0);
578
579	g_detach(cp);
580	g_destroy_consumer(cp);
581	g_destroy_geom(gp);
582	return (NULL);
583}
584
585/*
586 * Modify the providers for the given drive 'd'.  It is assumed that the
587 * subdisk list of 'd' is already correctly set up.
588 */
589void
590gv_drive_modify(struct gv_drive *d)
591{
592	struct g_geom *gp;
593	struct g_consumer *cp;
594	struct g_provider *pp, *pp2;
595	struct gv_sd *s;
596	int nsd;
597
598	KASSERT(d != NULL, ("gv_drive_modify: null d"));
599	gp = d->geom;
600	KASSERT(gp != NULL, ("gv_drive_modify: null gp"));
601	cp = LIST_FIRST(&gp->consumer);
602	KASSERT(cp != NULL, ("gv_drive_modify: null cp"));
603	pp = cp->provider;
604	KASSERT(pp != NULL, ("gv_drive_modify: null pp"));
605
606	g_topology_assert();
607
608	nsd = 0;
609	LIST_FOREACH(s, &d->subdisks, from_drive) {
610		/* This subdisk already has a provider. */
611		if (s->provider != NULL)
612			continue;
613		pp2 = g_new_providerf(gp, "gvinum/sd/%s", s->name);
614		pp2->mediasize = s->size;
615		pp2->sectorsize = pp->sectorsize;
616		g_error_provider(pp2, 0);
617		s->provider = pp2;
618		pp2->private = s;
619	}
620}
621
622static int
623gv_drive_destroy_geom(struct gctl_req *req, struct g_class *mp,
624    struct g_geom *gp)
625{
626	struct gv_drive *d;
627
628	g_trace(G_T_TOPOLOGY, "gv_drive_destroy_geom: %s", gp->name);
629	g_topology_assert();
630
631	d = gp->softc;
632	gv_kill_drive_thread(d);
633
634	g_wither_geom(gp, ENXIO);
635	return (0);
636}
637
638#define	VINUMDRIVE_CLASS_NAME "VINUMDRIVE"
639
640static struct g_class g_vinum_drive_class = {
641	.name = VINUMDRIVE_CLASS_NAME,
642	.version = G_VERSION,
643	.taste = gv_drive_taste,
644	.destroy_geom = gv_drive_destroy_geom
645};
646
647DECLARE_GEOM_CLASS(g_vinum_drive_class, g_vinum_drive);
648