vnd.c revision 1.28
1/*	$NetBSD: vnd.c,v 1.28 1996/09/25 02:22:10 christos Exp $	*/
2
3/*
4 * Copyright (c) 1988 University of Utah.
5 * Copyright (c) 1990, 1993
6 *	The Regents of the University of California.  All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * the Systems Programming Group of the University of Utah Computer
10 * Science Department.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 *    notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 *    notice, this list of conditions and the following disclaimer in the
19 *    documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 *    must display the following acknowledgement:
22 *	This product includes software developed by the University of
23 *	California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 *    may be used to endorse or promote products derived from this software
26 *    without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 * from: Utah $Hdr: vn.c 1.13 94/04/02$
41 *
42 *	@(#)vn.c	8.6 (Berkeley) 4/1/94
43 */
44
45/*
46 * Vnode disk driver.
47 *
48 * Block/character interface to a vnode.  Allows one to treat a file
49 * as a disk (e.g. build a filesystem in it, mount it, etc.).
50 *
51 * NOTE 1: This uses the VOP_BMAP/VOP_STRATEGY interface to the vnode
52 * instead of a simple VOP_RDWR.  We do this to avoid distorting the
53 * local buffer cache.
54 *
55 * NOTE 2: There is a security issue involved with this driver.
56 * Once mounted all access to the contents of the "mapped" file via
57 * the special file is controlled by the permissions on the special
58 * file, the protection of the mapped file is ignored (effectively,
59 * by using root credentials in all transactions).
60 *
61 * NOTE 3: Doesn't interact with leases, should it?
62 */
63
64#include <sys/param.h>
65#include <sys/systm.h>
66#include <sys/namei.h>
67#include <sys/proc.h>
68#include <sys/errno.h>
69#include <sys/buf.h>
70#include <sys/malloc.h>
71#include <sys/ioctl.h>
72#include <sys/disklabel.h>
73#include <sys/device.h>
74#include <sys/disk.h>
75#include <sys/stat.h>
76#include <sys/mount.h>
77#include <sys/vnode.h>
78#include <sys/file.h>
79#include <sys/uio.h>
80#include <sys/conf.h>
81
82#include <miscfs/specfs/specdev.h>
83
84#include <dev/vndioctl.h>
85
86#ifdef DEBUG
87int dovndcluster = 1;
88int vnddebug = 0x00;
89#define VDB_FOLLOW	0x01
90#define VDB_INIT	0x02
91#define VDB_IO		0x04
92#endif
93
94#define b_cylin	b_resid
95
96#define	vndunit(x)	DISKUNIT(x)
97
98struct vndbuf {
99	struct buf	vb_buf;
100	struct buf	*vb_obp;
101};
102
103#define	getvndbuf()	\
104	((struct vndbuf *)malloc(sizeof(struct vndbuf), M_DEVBUF, M_WAITOK))
105#define putvndbuf(vbp)	\
106	free((caddr_t)(vbp), M_DEVBUF)
107
108struct vnd_softc {
109	int		 sc_flags;	/* flags */
110	size_t		 sc_size;	/* size of vnd */
111	struct vnode	*sc_vp;		/* vnode */
112	struct ucred	*sc_cred;	/* credentials */
113	int		 sc_maxactive;	/* max # of active requests */
114	struct buf	 sc_tab;	/* transfer queue */
115	char		 sc_xname[8];	/* XXX external name */
116	struct disk	 sc_dkdev;	/* generic disk device info */
117};
118
119/* sc_flags */
120#define	VNF_ALIVE	0x01
121#define VNF_INITED	0x02
122#define VNF_WANTED	0x40
123#define VNF_LOCKED	0x80
124
125struct vnd_softc *vnd_softc;
126int numvnd = 0;
127
128/* called by main() at boot time */
129void	vndattach __P((int));
130
131void	vndclear __P((struct vnd_softc *));
132void	vndstart __P((struct vnd_softc *));
133int	vndsetcred __P((struct vnd_softc *, struct ucred *));
134void	vndthrottle __P((struct vnd_softc *, struct vnode *));
135void	vndiodone __P((struct buf *));
136void	vndshutdown __P((void));
137
138static	int vndlock __P((struct vnd_softc *));
139static	void vndunlock __P((struct vnd_softc *));
140
141void
142vndattach(num)
143	int num;
144{
145	char *mem;
146	register u_long size;
147
148	if (num <= 0)
149		return;
150	size = num * sizeof(struct vnd_softc);
151	mem = malloc(size, M_DEVBUF, M_NOWAIT);
152	if (mem == NULL) {
153		printf("WARNING: no memory for vnode disks\n");
154		return;
155	}
156	bzero(mem, size);
157	vnd_softc = (struct vnd_softc *)mem;
158	numvnd = num;
159}
160
161int
162vndopen(dev, flags, mode, p)
163	dev_t dev;
164	int flags, mode;
165	struct proc *p;
166{
167	int unit = vndunit(dev);
168	struct vnd_softc *sc;
169	int error = 0, part, pmask;
170
171	/*
172	 * XXX Should support disklabels.
173	 */
174
175#ifdef DEBUG
176	if (vnddebug & VDB_FOLLOW)
177		printf("vndopen(%x, %x, %x, %p)\n", dev, flags, mode, p);
178#endif
179	if (unit >= numvnd)
180		return (ENXIO);
181	sc = &vnd_softc[unit];
182
183	if ((error = vndlock(sc)) != 0)
184		return (error);
185
186	part = DISKPART(dev);
187	pmask = (1 << part);
188
189	/* Prevent our unit from being unconfigured while open. */
190	switch (mode) {
191	case S_IFCHR:
192		sc->sc_dkdev.dk_copenmask |= pmask;
193		break;
194
195	case S_IFBLK:
196		sc->sc_dkdev.dk_bopenmask |= pmask;
197		break;
198	}
199	sc->sc_dkdev.dk_openmask =
200	    sc->sc_dkdev.dk_copenmask | sc->sc_dkdev.dk_bopenmask;
201
202	vndunlock(sc);
203	return (0);
204}
205
206int
207vndclose(dev, flags, mode, p)
208	dev_t dev;
209	int flags, mode;
210	struct proc *p;
211{
212	int unit = vndunit(dev);
213	struct vnd_softc *sc;
214	int error = 0, part;
215
216#ifdef DEBUG
217	if (vnddebug & VDB_FOLLOW)
218		printf("vndclose(%x, %x, %x, %p)\n", dev, flags, mode, p);
219#endif
220
221	if (unit >= numvnd)
222		return (ENXIO);
223	sc = &vnd_softc[unit];
224
225	if ((error = vndlock(sc)) != 0)
226		return (error);
227
228	part = DISKPART(dev);
229
230	/* ...that much closer to allowing unconfiguration... */
231	switch (mode) {
232	case S_IFCHR:
233		sc->sc_dkdev.dk_copenmask &= ~(1 << part);
234		break;
235
236	case S_IFBLK:
237		sc->sc_dkdev.dk_bopenmask &= ~(1 << part);
238		break;
239	}
240	sc->sc_dkdev.dk_openmask =
241	    sc->sc_dkdev.dk_copenmask | sc->sc_dkdev.dk_bopenmask;
242
243	vndunlock(sc);
244	return (0);
245}
246
247/*
248 * Break the request into bsize pieces and submit using VOP_BMAP/VOP_STRATEGY.
249 * Note that this driver can only be used for swapping over NFS on the hp
250 * since nfs_strategy on the vax cannot handle u-areas and page tables.
251 */
252void
253vndstrategy(bp)
254	register struct buf *bp;
255{
256	int unit = vndunit(bp->b_dev);
257	register struct vnd_softc *vnd = &vnd_softc[unit];
258	register struct vndbuf *nbp;
259	register int bn, bsize, resid;
260	register caddr_t addr;
261	int sz, flags, error;
262
263#ifdef DEBUG
264	if (vnddebug & VDB_FOLLOW)
265		printf("vndstrategy(%p): unit %d\n", bp, unit);
266#endif
267	if ((vnd->sc_flags & VNF_INITED) == 0) {
268		bp->b_error = ENXIO;
269		bp->b_flags |= B_ERROR;
270		biodone(bp);
271		return;
272	}
273	bn = bp->b_blkno;
274	sz = howmany(bp->b_bcount, DEV_BSIZE);
275	bp->b_resid = bp->b_bcount;
276	if (bn < 0 || bn + sz > vnd->sc_size) {
277		if (bn != vnd->sc_size) {
278			bp->b_error = EINVAL;
279			bp->b_flags |= B_ERROR;
280		}
281		biodone(bp);
282		return;
283	}
284	bn = dbtob(bn);
285 	bsize = vnd->sc_vp->v_mount->mnt_stat.f_iosize;
286	addr = bp->b_data;
287	flags = bp->b_flags | B_CALL;
288	for (resid = bp->b_resid; resid; resid -= sz) {
289		struct vnode *vp;
290		daddr_t nbn;
291		int off, s, nra;
292
293		nra = 0;
294		VOP_LOCK(vnd->sc_vp);
295		error = VOP_BMAP(vnd->sc_vp, bn / bsize, &vp, &nbn, &nra);
296		VOP_UNLOCK(vnd->sc_vp);
297		if (error == 0 && (long)nbn == -1)
298			error = EIO;
299#ifdef DEBUG
300		if (!dovndcluster)
301			nra = 0;
302#endif
303
304		if ((off = bn % bsize) != 0)
305			sz = bsize - off;
306		else
307			sz = (1 + nra) * bsize;
308		if (resid < sz)
309			sz = resid;
310#ifdef DEBUG
311		if (vnddebug & VDB_IO)
312			printf("vndstrategy: vp %p/%p bn %x/%x sz %x\n",
313			       vnd->sc_vp, vp, bn, nbn, sz);
314#endif
315
316		nbp = getvndbuf();
317		nbp->vb_buf.b_flags = flags;
318		nbp->vb_buf.b_bcount = sz;
319		nbp->vb_buf.b_bufsize = bp->b_bufsize;
320		nbp->vb_buf.b_error = 0;
321		if (vp->v_type == VBLK || vp->v_type == VCHR)
322			nbp->vb_buf.b_dev = vp->v_rdev;
323		else
324			nbp->vb_buf.b_dev = NODEV;
325		nbp->vb_buf.b_data = addr;
326		nbp->vb_buf.b_blkno = nbn + btodb(off);
327		nbp->vb_buf.b_proc = bp->b_proc;
328		nbp->vb_buf.b_iodone = vndiodone;
329		nbp->vb_buf.b_vp = vp;
330		nbp->vb_buf.b_rcred = vnd->sc_cred;	/* XXX crdup? */
331		nbp->vb_buf.b_wcred = vnd->sc_cred;	/* XXX crdup? */
332		nbp->vb_buf.b_dirtyoff = bp->b_dirtyoff;
333		nbp->vb_buf.b_dirtyend = bp->b_dirtyend;
334		nbp->vb_buf.b_validoff = bp->b_validoff;
335		nbp->vb_buf.b_validend = bp->b_validend;
336
337		/* save a reference to the old buffer */
338		nbp->vb_obp = bp;
339
340		/*
341		 * If there was an error or a hole in the file...punt.
342		 * Note that we deal with this after the nbp allocation.
343		 * This ensures that we properly clean up any operations
344		 * that we have already fired off.
345		 *
346		 * XXX we could deal with holes here but it would be
347		 * a hassle (in the write case).
348		 */
349		if (error) {
350			nbp->vb_buf.b_error = error;
351			nbp->vb_buf.b_flags |= B_ERROR;
352			bp->b_resid -= (resid - sz);
353			biodone(&nbp->vb_buf);
354			return;
355		}
356		/*
357		 * Just sort by block number
358		 */
359		nbp->vb_buf.b_cylin = nbp->vb_buf.b_blkno;
360		s = splbio();
361		disksort(&vnd->sc_tab, &nbp->vb_buf);
362		if (vnd->sc_tab.b_active < vnd->sc_maxactive) {
363			vnd->sc_tab.b_active++;
364			vndstart(vnd);
365		}
366		splx(s);
367		bn += sz;
368		addr += sz;
369	}
370}
371
372/*
373 * Feed requests sequentially.
374 * We do it this way to keep from flooding NFS servers if we are connected
375 * to an NFS file.  This places the burden on the client rather than the
376 * server.
377 */
378void
379vndstart(vnd)
380	register struct vnd_softc *vnd;
381{
382	register struct buf *bp;
383
384	/*
385	 * Dequeue now since lower level strategy routine might
386	 * queue using same links
387	 */
388	bp = vnd->sc_tab.b_actf;
389	vnd->sc_tab.b_actf = bp->b_actf;
390#ifdef DEBUG
391	if (vnddebug & VDB_IO)
392		printf("vndstart(%ld): bp %p vp %p blkno %x addr %p cnt %lx\n",
393		    (long) (vnd-vnd_softc), bp, bp->b_vp, bp->b_blkno,
394		    bp->b_data, bp->b_bcount);
395#endif
396
397	/* Instrumentation. */
398	disk_busy(&vnd->sc_dkdev);
399
400	if ((bp->b_flags & B_READ) == 0)
401		bp->b_vp->v_numoutput++;
402	VOP_STRATEGY(bp);
403}
404
405void
406vndiodone(bp)
407	struct buf *bp;
408{
409	register struct vndbuf *vbp = (struct vndbuf *) bp;
410	register struct buf *pbp = vbp->vb_obp;
411	register struct vnd_softc *vnd = &vnd_softc[vndunit(pbp->b_dev)];
412	int s;
413
414	s = splbio();
415#ifdef DEBUG
416	if (vnddebug & VDB_IO)
417		printf("vndiodone(%ld): vbp %p vp %p blkno %x addr %p cnt %lx\n",
418		    (long) (vnd-vnd_softc), vbp, vbp->vb_buf.b_vp,
419		    vbp->vb_buf.b_blkno, vbp->vb_buf.b_data,
420		    vbp->vb_buf.b_bcount);
421#endif
422
423	if (vbp->vb_buf.b_error) {
424#ifdef DEBUG
425		if (vnddebug & VDB_IO)
426			printf("vndiodone: vbp %p error %d\n", vbp,
427			    vbp->vb_buf.b_error);
428#endif
429		pbp->b_flags |= B_ERROR;
430		pbp->b_error = biowait(&vbp->vb_buf);
431	}
432	pbp->b_resid -= vbp->vb_buf.b_bcount;
433	putvndbuf(vbp);
434	disk_unbusy(&vnd->sc_dkdev, (pbp->b_bcount - pbp->b_resid));
435	if (pbp->b_resid == 0) {
436#ifdef DEBUG
437		if (vnddebug & VDB_IO)
438			printf("vndiodone: pbp %p iodone\n", pbp);
439#endif
440		biodone(pbp);
441	}
442	if (vnd->sc_tab.b_actf)
443		vndstart(vnd);
444	else
445		vnd->sc_tab.b_active--;
446	splx(s);
447}
448
449/* ARGSUSED */
450int
451vndread(dev, uio, flags)
452	dev_t dev;
453	struct uio *uio;
454	int flags;
455{
456	int unit = vndunit(dev);
457	struct vnd_softc *sc;
458
459#ifdef DEBUG
460	if (vnddebug & VDB_FOLLOW)
461		printf("vndread(%x, %p)\n", dev, uio);
462#endif
463
464	if (unit >= numvnd)
465		return (ENXIO);
466	sc = &vnd_softc[unit];
467
468	if ((sc->sc_flags & VNF_INITED) == 0)
469		return (ENXIO);
470
471	return (physio(vndstrategy, NULL, dev, B_READ, minphys, uio));
472}
473
474/* ARGSUSED */
475int
476vndwrite(dev, uio, flags)
477	dev_t dev;
478	struct uio *uio;
479	int flags;
480{
481	int unit = vndunit(dev);
482	struct vnd_softc *sc;
483
484#ifdef DEBUG
485	if (vnddebug & VDB_FOLLOW)
486		printf("vndwrite(%x, %p)\n", dev, uio);
487#endif
488
489	if (unit >= numvnd)
490		return (ENXIO);
491	sc = &vnd_softc[unit];
492
493	if ((sc->sc_flags & VNF_INITED) == 0)
494		return (ENXIO);
495
496	return (physio(vndstrategy, NULL, dev, B_WRITE, minphys, uio));
497}
498
499/* ARGSUSED */
500int
501vndioctl(dev, cmd, data, flag, p)
502	dev_t dev;
503	u_long cmd;
504	caddr_t data;
505	int flag;
506	struct proc *p;
507{
508	int unit = vndunit(dev);
509	register struct vnd_softc *vnd;
510	struct vnd_ioctl *vio;
511	struct vattr vattr;
512	struct nameidata nd;
513	int error, part, pmask, s;
514
515#ifdef DEBUG
516	if (vnddebug & VDB_FOLLOW)
517		printf("vndioctl(%x, %lx, %p, %x, %p): unit %d\n",
518		    dev, cmd, data, flag, p, unit);
519#endif
520	error = suser(p->p_ucred, &p->p_acflag);
521	if (error)
522		return (error);
523	if (unit >= numvnd)
524		return (ENXIO);
525
526	vnd = &vnd_softc[unit];
527	vio = (struct vnd_ioctl *)data;
528	switch (cmd) {
529
530	case VNDIOCSET:
531		if (vnd->sc_flags & VNF_INITED)
532			return (EBUSY);
533
534		if ((error = vndlock(vnd)) != 0)
535			return (error);
536
537		/*
538		 * Always open for read and write.
539		 * This is probably bogus, but it lets vn_open()
540		 * weed out directories, sockets, etc. so we don't
541		 * have to worry about them.
542		 */
543		NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, vio->vnd_file, p);
544		if ((error = vn_open(&nd, FREAD|FWRITE, 0)) != 0) {
545			vndunlock(vnd);
546			return(error);
547		}
548		error = VOP_GETATTR(nd.ni_vp, &vattr, p->p_ucred, p);
549		if (error) {
550			VOP_UNLOCK(nd.ni_vp);
551			(void) vn_close(nd.ni_vp, FREAD|FWRITE, p->p_ucred, p);
552			vndunlock(vnd);
553			return(error);
554		}
555		VOP_UNLOCK(nd.ni_vp);
556		vnd->sc_vp = nd.ni_vp;
557		vnd->sc_size = btodb(vattr.va_size);	/* note truncation */
558		if ((error = vndsetcred(vnd, p->p_ucred)) != 0) {
559			(void) vn_close(nd.ni_vp, FREAD|FWRITE, p->p_ucred, p);
560			vndunlock(vnd);
561			return(error);
562		}
563		vndthrottle(vnd, vnd->sc_vp);
564		vio->vnd_size = dbtob(vnd->sc_size);
565		vnd->sc_flags |= VNF_INITED;
566#ifdef DEBUG
567		if (vnddebug & VDB_INIT)
568			printf("vndioctl: SET vp %p size %lx\n",
569			    vnd->sc_vp, (unsigned long) vnd->sc_size);
570#endif
571
572		/* Attach the disk. */
573		bzero(vnd->sc_xname, sizeof(vnd->sc_xname));	/* XXX */
574		sprintf(vnd->sc_xname, "vnd%d", unit);		/* XXX */
575		vnd->sc_dkdev.dk_name = vnd->sc_xname;
576		disk_attach(&vnd->sc_dkdev);
577
578		vndunlock(vnd);
579
580		break;
581
582	case VNDIOCCLR:
583		if ((vnd->sc_flags & VNF_INITED) == 0)
584			return (ENXIO);
585
586		if ((error = vndlock(vnd)) != 0)
587			return (error);
588
589		/*
590		 * Don't unconfigure if any other partitions are open
591		 * or if both the character and block flavors of this
592		 * partition are open.
593		 */
594		part = DISKPART(dev);
595		pmask = (1 << part);
596		if ((vnd->sc_dkdev.dk_openmask & ~pmask) ||
597		    ((vnd->sc_dkdev.dk_bopenmask & pmask) &&
598		    (vnd->sc_dkdev.dk_copenmask & pmask))) {
599			vndunlock(vnd);
600			return (EBUSY);
601		}
602
603		vndclear(vnd);
604#ifdef DEBUG
605		if (vnddebug & VDB_INIT)
606			printf("vndioctl: CLRed\n");
607#endif
608
609		/* Detatch the disk. */
610		disk_detach(&vnd->sc_dkdev);
611
612		/* This must be atomic. */
613		s = splhigh();
614		vndunlock(vnd);
615		bzero(vnd, sizeof(struct vnd_softc));
616		splx(s);
617
618		break;
619
620	/*
621	 * XXX Should support disklabels.
622	 */
623
624	default:
625		return(ENOTTY);
626	}
627
628	return (0);
629}
630
631/*
632 * Duplicate the current processes' credentials.  Since we are called only
633 * as the result of a SET ioctl and only root can do that, any future access
634 * to this "disk" is essentially as root.  Note that credentials may change
635 * if some other uid can write directly to the mapped file (NFS).
636 */
637int
638vndsetcred(vnd, cred)
639	register struct vnd_softc *vnd;
640	struct ucred *cred;
641{
642	struct uio auio;
643	struct iovec aiov;
644	char *tmpbuf;
645	int error;
646
647	vnd->sc_cred = crdup(cred);
648	tmpbuf = malloc(DEV_BSIZE, M_TEMP, M_WAITOK);
649
650	/* XXX: Horrible kludge to establish credentials for NFS */
651	aiov.iov_base = tmpbuf;
652	aiov.iov_len = min(DEV_BSIZE, dbtob(vnd->sc_size));
653	auio.uio_iov = &aiov;
654	auio.uio_iovcnt = 1;
655	auio.uio_offset = 0;
656	auio.uio_rw = UIO_READ;
657	auio.uio_segflg = UIO_SYSSPACE;
658	auio.uio_resid = aiov.iov_len;
659	VOP_LOCK(vnd->sc_vp);
660	error = VOP_READ(vnd->sc_vp, &auio, 0, vnd->sc_cred);
661	VOP_UNLOCK(vnd->sc_vp);
662
663	free(tmpbuf, M_TEMP);
664	return (error);
665}
666
667/*
668 * Set maxactive based on FS type
669 */
670void
671vndthrottle(vnd, vp)
672	register struct vnd_softc *vnd;
673	struct vnode *vp;
674{
675#ifdef NFSCLIENT
676	extern int (**nfsv2_vnodeop_p) __P((void *));
677
678	if (vp->v_op == nfsv2_vnodeop_p)
679		vnd->sc_maxactive = 2;
680	else
681#endif
682		vnd->sc_maxactive = 8;
683
684	if (vnd->sc_maxactive < 1)
685		vnd->sc_maxactive = 1;
686}
687
688void
689vndshutdown()
690{
691	register struct vnd_softc *vnd;
692
693	for (vnd = &vnd_softc[0]; vnd < &vnd_softc[numvnd]; vnd++)
694		if (vnd->sc_flags & VNF_INITED)
695			vndclear(vnd);
696}
697
698void
699vndclear(vnd)
700	register struct vnd_softc *vnd;
701{
702	register struct vnode *vp = vnd->sc_vp;
703	struct proc *p = curproc;		/* XXX */
704
705#ifdef DEBUG
706	if (vnddebug & VDB_FOLLOW)
707		printf("vndclear(%p): vp %p\n", vnd, vp);
708#endif
709	vnd->sc_flags &= ~VNF_INITED;
710	if (vp == (struct vnode *)0)
711		panic("vndioctl: null vp");
712	(void) vn_close(vp, FREAD|FWRITE, vnd->sc_cred, p);
713	crfree(vnd->sc_cred);
714	vnd->sc_vp = (struct vnode *)0;
715	vnd->sc_cred = (struct ucred *)0;
716	vnd->sc_size = 0;
717}
718
719int
720vndsize(dev)
721	dev_t dev;
722{
723	int unit = vndunit(dev);
724	register struct vnd_softc *vnd = &vnd_softc[unit];
725
726	if (unit >= numvnd || (vnd->sc_flags & VNF_INITED) == 0)
727		return(-1);
728	return(vnd->sc_size);
729}
730
731int
732vnddump(dev, blkno, va, size)
733	dev_t dev;
734	daddr_t blkno;
735	caddr_t va;
736	size_t size;
737{
738
739	/* Not implemented. */
740	return ENXIO;
741}
742
743/*
744 * Wait interruptibly for an exclusive lock.
745 *
746 * XXX
747 * Several drivers do this; it should be abstracted and made MP-safe.
748 */
749static int
750vndlock(sc)
751	struct vnd_softc *sc;
752{
753	int error;
754
755	while ((sc->sc_flags & VNF_LOCKED) != 0) {
756		sc->sc_flags |= VNF_WANTED;
757		if ((error = tsleep(sc, PRIBIO | PCATCH, "vndlck", 0)) != 0)
758			return (error);
759	}
760	sc->sc_flags |= VNF_LOCKED;
761	return (0);
762}
763
764/*
765 * Unlock and wake up any waiters.
766 */
767static void
768vndunlock(sc)
769	struct vnd_softc *sc;
770{
771
772	sc->sc_flags &= ~VNF_LOCKED;
773	if ((sc->sc_flags & VNF_WANTED) != 0) {
774		sc->sc_flags &= ~VNF_WANTED;
775		wakeup(sc);
776	}
777}
778