geom_dev.c revision 182843
1/*-
2 * Copyright (c) 2002 Poul-Henning Kamp
3 * Copyright (c) 2002 Networks Associates Technology, Inc.
4 * All rights reserved.
5 *
6 * This software was developed for the FreeBSD Project by Poul-Henning Kamp
7 * and NAI Labs, the Security Research Division of Network Associates, Inc.
8 * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
9 * DARPA CHATS research program.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 * 3. The names of the authors may not be used to endorse or promote
20 *    products derived from this software without specific prior written
21 *    permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 */
35
36#include <sys/cdefs.h>
37__FBSDID("$FreeBSD: head/sys/geom/geom_dev.c 182843 2008-09-07 13:54:57Z lulf $");
38
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/malloc.h>
42#include <sys/kernel.h>
43#include <sys/conf.h>
44#include <sys/bio.h>
45#include <sys/lock.h>
46#include <sys/mutex.h>
47#include <sys/proc.h>
48#include <sys/errno.h>
49#include <sys/time.h>
50#include <sys/disk.h>
51#include <sys/fcntl.h>
52#include <sys/limits.h>
53#include <geom/geom.h>
54#include <geom/geom_int.h>
55
56static d_open_t		g_dev_open;
57static d_close_t	g_dev_close;
58static d_strategy_t	g_dev_strategy;
59static d_ioctl_t	g_dev_ioctl;
60
61static struct cdevsw g_dev_cdevsw = {
62	.d_version =	D_VERSION,
63	.d_open =	g_dev_open,
64	.d_close =	g_dev_close,
65	.d_read =	physread,
66	.d_write =	physwrite,
67	.d_ioctl =	g_dev_ioctl,
68	.d_strategy =	g_dev_strategy,
69	.d_name =	"g_dev",
70	.d_flags =	D_DISK | D_TRACKCLOSE,
71};
72
73static g_taste_t g_dev_taste;
74static g_orphan_t g_dev_orphan;
75static g_init_t		g_dev_init;
76
77static struct g_class g_dev_class	= {
78	.name = "DEV",
79	.version = G_VERSION,
80	.taste = g_dev_taste,
81	.orphan = g_dev_orphan,
82	.init = g_dev_init,
83};
84
85static struct unrhdr *unithdr;	/* Locked by topology */
86
87static void
88g_dev_init(struct g_class *mp)
89{
90
91	unithdr = new_unrhdr(0, INT_MAX, NULL);
92}
93
94void
95g_dev_print(void)
96{
97	struct g_geom *gp;
98	char const *p = "";
99
100	LIST_FOREACH(gp, &g_dev_class.geom, geom) {
101		printf("%s%s", p, gp->name);
102		p = " ";
103	}
104	printf("\n");
105}
106
107struct g_provider *
108g_dev_getprovider(struct cdev *dev)
109{
110	struct g_consumer *cp;
111
112	g_topology_assert();
113	if (dev == NULL)
114		return (NULL);
115	if (dev->si_devsw != &g_dev_cdevsw)
116		return (NULL);
117	cp = dev->si_drv2;
118	return (cp->provider);
119}
120
121
122static struct g_geom *
123g_dev_taste(struct g_class *mp, struct g_provider *pp, int insist __unused)
124{
125	struct g_geom *gp;
126	struct g_consumer *cp;
127	int error;
128	struct cdev *dev;
129	u_int unit;
130
131	g_trace(G_T_TOPOLOGY, "dev_taste(%s,%s)", mp->name, pp->name);
132	g_topology_assert();
133	LIST_FOREACH(cp, &pp->consumers, consumers)
134		if (cp->geom->class == mp)
135			return (NULL);
136	gp = g_new_geomf(mp, pp->name);
137	cp = g_new_consumer(gp);
138	error = g_attach(cp, pp);
139	KASSERT(error == 0,
140	    ("g_dev_taste(%s) failed to g_attach, err=%d", pp->name, error));
141	unit = alloc_unr(unithdr);
142	dev = make_dev(&g_dev_cdevsw, unit2minor(unit),
143	    UID_ROOT, GID_OPERATOR, 0640, gp->name);
144	if (pp->flags & G_PF_CANDELETE)
145		dev->si_flags |= SI_CANDELETE;
146	dev->si_iosize_max = MAXPHYS;
147	gp->softc = dev;
148	dev->si_drv1 = gp;
149	dev->si_drv2 = cp;
150	return (gp);
151}
152
153static int
154g_dev_open(struct cdev *dev, int flags, int fmt, struct thread *td)
155{
156	struct g_geom *gp;
157	struct g_consumer *cp;
158	int error, r, w, e;
159
160	gp = dev->si_drv1;
161	cp = dev->si_drv2;
162	if (gp == NULL || cp == NULL || gp->softc != dev)
163		return(ENXIO);		/* g_dev_taste() not done yet */
164
165	g_trace(G_T_ACCESS, "g_dev_open(%s, %d, %d, %p)",
166	    gp->name, flags, fmt, td);
167
168	r = flags & FREAD ? 1 : 0;
169	w = flags & FWRITE ? 1 : 0;
170#ifdef notyet
171	e = flags & O_EXCL ? 1 : 0;
172#else
173	e = 0;
174#endif
175	if (w) {
176		/*
177		 * When running in very secure mode, do not allow
178		 * opens for writing of any disks.
179		 */
180		error = securelevel_ge(td->td_ucred, 2);
181		if (error)
182			return (error);
183	}
184	g_topology_lock();
185	if (dev->si_devsw == NULL)
186		error = ENXIO;		/* We were orphaned */
187	else
188		error = g_access(cp, r, w, e);
189	g_topology_unlock();
190	return(error);
191}
192
193static int
194g_dev_close(struct cdev *dev, int flags, int fmt, struct thread *td)
195{
196	struct g_geom *gp;
197	struct g_consumer *cp;
198	int error, r, w, e, i;
199
200	gp = dev->si_drv1;
201	cp = dev->si_drv2;
202	if (gp == NULL || cp == NULL)
203		return(ENXIO);
204	g_trace(G_T_ACCESS, "g_dev_close(%s, %d, %d, %p)",
205	    gp->name, flags, fmt, td);
206	r = flags & FREAD ? -1 : 0;
207	w = flags & FWRITE ? -1 : 0;
208#ifdef notyet
209	e = flags & O_EXCL ? -1 : 0;
210#else
211	e = 0;
212#endif
213	g_topology_lock();
214	if (dev->si_devsw == NULL)
215		error = ENXIO;		/* We were orphaned */
216	else
217		error = g_access(cp, r, w, e);
218	for (i = 0; i < 10 * hz;) {
219		if (cp->acr != 0 || cp->acw != 0)
220			break;
221 		if (cp->nstart == cp->nend)
222			break;
223		pause("gdevwclose", hz / 10);
224		i += hz / 10;
225	}
226	if (cp->acr == 0 && cp->acw == 0 && cp->nstart != cp->nend) {
227		printf("WARNING: Final close of geom_dev(%s) %s %s\n",
228		    gp->name,
229		    "still has outstanding I/O after 10 seconds.",
230		    "Completing close anyway, panic may happen later.");
231	}
232	g_topology_unlock();
233	return (error);
234}
235
236/*
237 * XXX: Until we have unmessed the ioctl situation, there is a race against
238 * XXX: a concurrent orphanization.  We cannot close it by holding topology
239 * XXX: since that would prevent us from doing our job, and stalling events
240 * XXX: will break (actually: stall) the BSD disklabel hacks.
241 */
242static int
243g_dev_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td)
244{
245	struct g_geom *gp;
246	struct g_consumer *cp;
247	struct g_provider *pp;
248	struct g_kerneldump kd;
249	off_t offset, length, chunk;
250	int i, error;
251	u_int u;
252
253	gp = dev->si_drv1;
254	cp = dev->si_drv2;
255	pp = cp->provider;
256
257	error = 0;
258	KASSERT(cp->acr || cp->acw,
259	    ("Consumer with zero access count in g_dev_ioctl"));
260
261	i = IOCPARM_LEN(cmd);
262	switch (cmd) {
263	case DIOCGSECTORSIZE:
264		*(u_int *)data = cp->provider->sectorsize;
265		if (*(u_int *)data == 0)
266			error = ENOENT;
267		break;
268	case DIOCGMEDIASIZE:
269		*(off_t *)data = cp->provider->mediasize;
270		if (*(off_t *)data == 0)
271			error = ENOENT;
272		break;
273	case DIOCGFWSECTORS:
274		error = g_io_getattr("GEOM::fwsectors", cp, &i, data);
275		if (error == 0 && *(u_int *)data == 0)
276			error = ENOENT;
277		break;
278	case DIOCGFWHEADS:
279		error = g_io_getattr("GEOM::fwheads", cp, &i, data);
280		if (error == 0 && *(u_int *)data == 0)
281			error = ENOENT;
282		break;
283	case DIOCGFRONTSTUFF:
284		error = g_io_getattr("GEOM::frontstuff", cp, &i, data);
285		break;
286	case DIOCSKERNELDUMP:
287		u = *((u_int *)data);
288		if (!u) {
289			set_dumper(NULL);
290			error = 0;
291			break;
292		}
293		kd.offset = 0;
294		kd.length = OFF_MAX;
295		i = sizeof kd;
296		error = g_io_getattr("GEOM::kerneldump", cp, &i, &kd);
297		if (!error)
298			dev->si_flags |= SI_DUMPDEV;
299		break;
300	case DIOCGFLUSH:
301		error = g_io_flush(cp);
302		break;
303	case DIOCGDELETE:
304		offset = ((off_t *)data)[0];
305		length = ((off_t *)data)[1];
306		if ((offset % cp->provider->sectorsize) != 0 ||
307		    (length % cp->provider->sectorsize) != 0 || length <= 0) {
308			printf("%s: offset=%jd length=%jd\n", __func__, offset,
309			    length);
310			error = EINVAL;
311			break;
312		}
313		while (length > 0) {
314			chunk = length;
315			if (chunk > 1024 * cp->provider->sectorsize)
316				chunk = 1024 * cp->provider->sectorsize;
317			error = g_delete_data(cp, offset, chunk);
318			length -= chunk;
319			offset += chunk;
320			if (error)
321				break;
322			/*
323			 * Since the request size is unbounded, the service
324			 * time is likewise.  We make this ioctl interruptible
325			 * by checking for signals for each bio.
326			 */
327			if (SIGPENDING(td))
328				break;
329		}
330		break;
331	case DIOCGIDENT:
332		error = g_io_getattr("GEOM::ident", cp, &i, data);
333		break;
334	case DIOCGPROVIDERNAME:
335		if (pp == NULL)
336			return (ENOENT);
337		strlcpy(data, pp->name, i);
338		break;
339
340	default:
341		if (cp->provider->geom->ioctl != NULL) {
342			error = cp->provider->geom->ioctl(cp->provider, cmd, data, fflag, td);
343		} else {
344			error = ENOIOCTL;
345		}
346	}
347
348	return (error);
349}
350
351static void
352g_dev_done(struct bio *bp2)
353{
354	struct bio *bp;
355
356	bp = bp2->bio_parent;
357	bp->bio_error = bp2->bio_error;
358	if (bp->bio_error != 0) {
359		g_trace(G_T_BIO, "g_dev_done(%p) had error %d",
360		    bp2, bp->bio_error);
361		bp->bio_flags |= BIO_ERROR;
362	} else {
363		g_trace(G_T_BIO, "g_dev_done(%p/%p) resid %ld completed %jd",
364		    bp2, bp, bp->bio_resid, (intmax_t)bp2->bio_completed);
365	}
366	bp->bio_resid = bp->bio_length - bp2->bio_completed;
367	bp->bio_completed = bp2->bio_completed;
368	g_destroy_bio(bp2);
369	biodone(bp);
370}
371
372static void
373g_dev_strategy(struct bio *bp)
374{
375	struct g_consumer *cp;
376	struct bio *bp2;
377	struct cdev *dev;
378
379	KASSERT(bp->bio_cmd == BIO_READ ||
380	        bp->bio_cmd == BIO_WRITE ||
381	        bp->bio_cmd == BIO_DELETE,
382		("Wrong bio_cmd bio=%p cmd=%d", bp, bp->bio_cmd));
383	dev = bp->bio_dev;
384	cp = dev->si_drv2;
385	KASSERT(cp->acr || cp->acw,
386	    ("Consumer with zero access count in g_dev_strategy"));
387
388	if ((bp->bio_offset % cp->provider->sectorsize) != 0 ||
389	    (bp->bio_bcount % cp->provider->sectorsize) != 0) {
390		bp->bio_resid = bp->bio_bcount;
391		biofinish(bp, NULL, EINVAL);
392		return;
393	}
394
395	for (;;) {
396		/*
397		 * XXX: This is not an ideal solution, but I belive it to
398		 * XXX: deadlock safe, all things considered.
399		 */
400		bp2 = g_clone_bio(bp);
401		if (bp2 != NULL)
402			break;
403		pause("gdstrat", hz / 10);
404	}
405	KASSERT(bp2 != NULL, ("XXX: ENOMEM in a bad place"));
406	bp2->bio_done = g_dev_done;
407	g_trace(G_T_BIO,
408	    "g_dev_strategy(%p/%p) offset %jd length %jd data %p cmd %d",
409	    bp, bp2, (intmax_t)bp->bio_offset, (intmax_t)bp2->bio_length,
410	    bp2->bio_data, bp2->bio_cmd);
411	g_io_request(bp2, cp);
412	KASSERT(cp->acr || cp->acw,
413	    ("g_dev_strategy raced with g_dev_close and lost"));
414
415}
416
417/*
418 * g_dev_orphan()
419 *
420 * Called from below when the provider orphaned us.
421 * - Clear any dump settings.
422 * - Destroy the struct cdev *to prevent any more request from coming in.  The
423 *   provider is already marked with an error, so anything which comes in
424 *   in the interrim will be returned immediately.
425 * - Wait for any outstanding I/O to finish.
426 * - Set our access counts to zero, whatever they were.
427 * - Detach and self-destruct.
428 */
429
430static void
431g_dev_orphan(struct g_consumer *cp)
432{
433	struct g_geom *gp;
434	struct cdev *dev;
435	u_int unit;
436
437	g_topology_assert();
438	gp = cp->geom;
439	dev = gp->softc;
440	g_trace(G_T_TOPOLOGY, "g_dev_orphan(%p(%s))", cp, gp->name);
441
442	/* Reset any dump-area set on this device */
443	if (dev->si_flags & SI_DUMPDEV)
444		set_dumper(NULL);
445
446	/* Destroy the struct cdev *so we get no more requests */
447	unit = dev2unit(dev);
448	destroy_dev(dev);
449	free_unr(unithdr, unit);
450
451	/* Wait for the cows to come home */
452	while (cp->nstart != cp->nend)
453		pause("gdevorphan", hz / 10);
454
455	if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0)
456		g_access(cp, -cp->acr, -cp->acw, -cp->ace);
457
458	g_detach(cp);
459	g_destroy_consumer(cp);
460	g_destroy_geom(gp);
461}
462
463DECLARE_GEOM_CLASS(g_dev_class, g_dev);
464