1/* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)vfs_subr.c 8.13 (Berkeley) 4/18/94
| 1/* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)vfs_subr.c 8.13 (Berkeley) 4/18/94
|
39 * $Id: vfs_subr.c,v 1.57 1996/07/30 18:00:25 bde Exp $
| 39 * $Id: vfs_subr.c,v 1.58 1996/08/15 06:45:01 dyson Exp $
|
40 */ 41 42/* 43 * External virtual filesystem routines 44 */ 45#include "opt_ddb.h" 46 47#include <sys/param.h> 48#include <sys/systm.h> 49#include <sys/kernel.h> 50#include <sys/file.h> 51#include <sys/proc.h> 52#include <sys/mount.h> 53#include <sys/time.h> 54#include <sys/vnode.h> 55#include <sys/stat.h> 56#include <sys/namei.h> 57#include <sys/ucred.h> 58#include <sys/buf.h> 59#include <sys/errno.h> 60#include <sys/malloc.h> 61#include <sys/domain.h> 62#include <sys/mbuf.h> 63 64#include <vm/vm.h> 65#include <vm/vm_param.h> 66#include <vm/vm_object.h> 67#include <vm/vm_extern.h>
| 40 */ 41 42/* 43 * External virtual filesystem routines 44 */ 45#include "opt_ddb.h" 46 47#include <sys/param.h> 48#include <sys/systm.h> 49#include <sys/kernel.h> 50#include <sys/file.h> 51#include <sys/proc.h> 52#include <sys/mount.h> 53#include <sys/time.h> 54#include <sys/vnode.h> 55#include <sys/stat.h> 56#include <sys/namei.h> 57#include <sys/ucred.h> 58#include <sys/buf.h> 59#include <sys/errno.h> 60#include <sys/malloc.h> 61#include <sys/domain.h> 62#include <sys/mbuf.h> 63 64#include <vm/vm.h> 65#include <vm/vm_param.h> 66#include <vm/vm_object.h> 67#include <vm/vm_extern.h>
|
| 68#include <vm/vm_pager.h> 69#include <vm/vnode_pager.h>
|
68#include <sys/sysctl.h> 69 70#include <miscfs/specfs/specdev.h> 71 72#ifdef DDB 73extern void printlockedvnodes __P((void)); 74#endif 75extern void vclean __P((struct vnode *vp, int flags)); 76extern void vfs_unmountroot __P((struct mount *rootfs)); 77 78enum vtype iftovt_tab[16] = { 79 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 80 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD, 81}; 82int vttoif_tab[9] = { 83 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 84 S_IFSOCK, S_IFIFO, S_IFMT, 85}; 86 87/* 88 * Insq/Remq for the vnode usage lists. 89 */ 90#define bufinsvn(bp, dp) LIST_INSERT_HEAD(dp, bp, b_vnbufs) 91#define bufremvn(bp) { \ 92 LIST_REMOVE(bp, b_vnbufs); \ 93 (bp)->b_vnbufs.le_next = NOLIST; \ 94} 95 96TAILQ_HEAD(freelst, vnode) vnode_free_list; /* vnode free list */ 97static u_long freevnodes = 0; 98 99struct mntlist mountlist; /* mounted filesystem list */ 100 101int desiredvnodes; 102SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RD, &desiredvnodes, 0, ""); 103 104static void vfs_free_addrlist __P((struct netexport *nep)); 105static int vfs_free_netcred __P((struct radix_node *rn, void *w)); 106static int vfs_hang_addrlist __P((struct mount *mp, struct netexport *nep, 107 struct export_args *argp)); 108 109/* 110 * Initialize the vnode management data structures. 111 */ 112void 113vntblinit() 114{ 115 desiredvnodes = maxproc + vm_object_cache_max + extravnodes; 116 117 TAILQ_INIT(&vnode_free_list); 118 CIRCLEQ_INIT(&mountlist); 119} 120 121/* 122 * Lock a filesystem. 123 * Used to prevent access to it while mounting and unmounting. 124 */ 125int 126vfs_lock(mp) 127 register struct mount *mp; 128{ 129 130 while (mp->mnt_flag & MNT_MLOCK) { 131 mp->mnt_flag |= MNT_MWAIT; 132 (void) tsleep((caddr_t) mp, PVFS, "vfslck", 0); 133 } 134 mp->mnt_flag |= MNT_MLOCK; 135 return (0); 136} 137 138/* 139 * Unlock a locked filesystem. 140 * Panic if filesystem is not locked. 141 */ 142void 143vfs_unlock(mp) 144 register struct mount *mp; 145{ 146 147 if ((mp->mnt_flag & MNT_MLOCK) == 0) 148 panic("vfs_unlock: not locked"); 149 mp->mnt_flag &= ~MNT_MLOCK; 150 if (mp->mnt_flag & MNT_MWAIT) { 151 mp->mnt_flag &= ~MNT_MWAIT; 152 wakeup((caddr_t) mp); 153 } 154} 155 156/* 157 * Mark a mount point as busy. 158 * Used to synchronize access and to delay unmounting. 159 */ 160int 161vfs_busy(mp) 162 register struct mount *mp; 163{ 164 165 while (mp->mnt_flag & MNT_MPBUSY) { 166 mp->mnt_flag |= MNT_MPWANT; 167 (void) tsleep((caddr_t) &mp->mnt_flag, PVFS, "vfsbsy", 0); 168 } 169 if (mp->mnt_flag & MNT_UNMOUNT) 170 return (1); 171 mp->mnt_flag |= MNT_MPBUSY; 172 return (0); 173} 174 175/* 176 * Free a busy filesystem. 177 * Panic if filesystem is not busy. 178 */ 179void 180vfs_unbusy(mp) 181 register struct mount *mp; 182{ 183 184 if ((mp->mnt_flag & MNT_MPBUSY) == 0) 185 panic("vfs_unbusy: not busy"); 186 mp->mnt_flag &= ~MNT_MPBUSY; 187 if (mp->mnt_flag & MNT_MPWANT) { 188 mp->mnt_flag &= ~MNT_MPWANT; 189 wakeup((caddr_t) &mp->mnt_flag); 190 } 191} 192 193void 194vfs_unmountroot(struct mount *rootfs) 195{ 196 struct mount *mp = rootfs; 197 int error; 198 199 if (vfs_busy(mp)) { 200 printf("failed to unmount root\n"); 201 return; 202 } 203 mp->mnt_flag |= MNT_UNMOUNT; 204 if ((error = vfs_lock(mp))) { 205 printf("lock of root filesystem failed (%d)\n", error); 206 return; 207 } 208 vnode_pager_umount(mp); /* release cached vnodes */ 209 cache_purgevfs(mp); /* remove cache entries for this file sys */ 210 211 if ((error = VFS_SYNC(mp, MNT_WAIT, initproc->p_ucred, initproc))) 212 printf("sync of root filesystem failed (%d)\n", error); 213 214 if ((error = VFS_UNMOUNT(mp, MNT_FORCE, initproc))) { 215 printf("unmount of root filesystem failed ("); 216 if (error == EBUSY) 217 printf("BUSY)\n"); 218 else 219 printf("%d)\n", error); 220 } 221 mp->mnt_flag &= ~MNT_UNMOUNT; 222 vfs_unbusy(mp); 223} 224 225/* 226 * Unmount all filesystems. Should only be called by halt(). 227 */ 228void 229vfs_unmountall() 230{ 231 struct mount *mp, *nmp, *rootfs = NULL; 232 int error; 233 234 /* unmount all but rootfs */ 235 for (mp = mountlist.cqh_last; mp != (void *)&mountlist; mp = nmp) { 236 nmp = mp->mnt_list.cqe_prev; 237 238 if (mp->mnt_flag & MNT_ROOTFS) { 239 rootfs = mp; 240 continue; 241 } 242 error = dounmount(mp, MNT_FORCE, initproc); 243 if (error) { 244 printf("unmount of %s failed (", mp->mnt_stat.f_mntonname); 245 if (error == EBUSY) 246 printf("BUSY)\n"); 247 else 248 printf("%d)\n", error); 249 } 250 } 251 252 /* and finally... */ 253 if (rootfs) { 254 vfs_unmountroot(rootfs); 255 } else { 256 printf("no root filesystem\n"); 257 } 258} 259 260/* 261 * Lookup a mount point by filesystem identifier. 262 */ 263struct mount * 264getvfs(fsid) 265 fsid_t *fsid; 266{ 267 register struct mount *mp; 268 269 for (mp = mountlist.cqh_first; mp != (void *)&mountlist; 270 mp = mp->mnt_list.cqe_next) { 271 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 272 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) 273 return (mp); 274 } 275 return ((struct mount *) 0); 276} 277 278/* 279 * Get a new unique fsid 280 */ 281void 282getnewfsid(mp, mtype) 283 struct mount *mp; 284 int mtype; 285{ 286 static u_short xxxfs_mntid; 287 288 fsid_t tfsid; 289 290 mp->mnt_stat.f_fsid.val[0] = makedev(nblkdev + mtype, 0); 291 mp->mnt_stat.f_fsid.val[1] = mtype; 292 if (xxxfs_mntid == 0) 293 ++xxxfs_mntid; 294 tfsid.val[0] = makedev(nblkdev + mtype, xxxfs_mntid); 295 tfsid.val[1] = mtype; 296 if (mountlist.cqh_first != (void *)&mountlist) { 297 while (getvfs(&tfsid)) { 298 tfsid.val[0]++; 299 xxxfs_mntid++; 300 } 301 } 302 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 303} 304 305/* 306 * Set vnode attributes to VNOVAL 307 */ 308void 309vattr_null(vap) 310 register struct vattr *vap; 311{ 312 313 vap->va_type = VNON; 314 vap->va_size = VNOVAL; 315 vap->va_bytes = VNOVAL; 316 vap->va_mode = vap->va_nlink = vap->va_uid = vap->va_gid = 317 vap->va_fsid = vap->va_fileid = 318 vap->va_blocksize = vap->va_rdev = 319 vap->va_atime.ts_sec = vap->va_atime.ts_nsec = 320 vap->va_mtime.ts_sec = vap->va_mtime.ts_nsec = 321 vap->va_ctime.ts_sec = vap->va_ctime.ts_nsec = 322 vap->va_flags = vap->va_gen = VNOVAL; 323 vap->va_vaflags = 0; 324} 325 326/* 327 * Routines having to do with the management of the vnode table. 328 */ 329extern vop_t **dead_vnodeop_p; 330 331/* 332 * Return the next vnode from the free list. 333 */ 334int 335getnewvnode(tag, mp, vops, vpp) 336 enum vtagtype tag; 337 struct mount *mp; 338 vop_t **vops; 339 struct vnode **vpp; 340{ 341 register struct vnode *vp; 342 343retry: 344 vp = vnode_free_list.tqh_first; 345 /* 346 * we allocate a new vnode if 347 * 1. we don't have any free 348 * Pretty obvious, we actually used to panic, but that 349 * is a silly thing to do. 350 * 2. we havn't filled our pool yet 351 * We don't want to trash the incore (VM-)vnodecache. 352 * 3. if less that 1/4th of our vnodes are free. 353 * We don't want to trash the namei cache either. 354 */ 355 if (freevnodes < (numvnodes >> 2) || 356 numvnodes < desiredvnodes || 357 vp == NULL) { 358 vp = (struct vnode *) malloc((u_long) sizeof *vp, 359 M_VNODE, M_WAITOK); 360 bzero((char *) vp, sizeof *vp); 361 numvnodes++; 362 } else { 363 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 364 if (vp->v_usage > 0) { 365 --vp->v_usage; 366 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 367 goto retry; 368 } 369 freevnodes--; 370 if (vp->v_usecount) 371 panic("free vnode isn't"); 372 373 /* see comment on why 0xdeadb is set at end of vgone (below) */ 374 vp->v_freelist.tqe_prev = (struct vnode **) 0xdeadb; 375 vp->v_lease = NULL; 376 if (vp->v_type != VBAD) 377 vgone(vp); 378 379#ifdef DIAGNOSTIC 380 { 381 int s; 382 383 if (vp->v_data) 384 panic("cleaned vnode isn't"); 385 s = splbio(); 386 if (vp->v_numoutput) 387 panic("Clean vnode has pending I/O's"); 388 splx(s); 389 } 390#endif 391 vp->v_flag = 0; 392 vp->v_lastr = 0; 393 vp->v_ralen = 0; 394 vp->v_maxra = 0; 395 vp->v_lastw = 0; 396 vp->v_lasta = 0; 397 vp->v_cstart = 0; 398 vp->v_clen = 0; 399 vp->v_socket = 0; 400 vp->v_writecount = 0; /* XXX */ 401 vp->v_usage = 0; 402 } 403 vp->v_type = VNON; 404 cache_purge(vp); 405 vp->v_tag = tag; 406 vp->v_op = vops; 407 insmntque(vp, mp); 408 *vpp = vp; 409 vp->v_usecount = 1; 410 vp->v_data = 0; 411 return (0); 412} 413 414/* 415 * Move a vnode from one mount queue to another. 416 */ 417void 418insmntque(vp, mp) 419 register struct vnode *vp; 420 register struct mount *mp; 421{ 422 423 /* 424 * Delete from old mount point vnode list, if on one. 425 */ 426 if (vp->v_mount != NULL) 427 LIST_REMOVE(vp, v_mntvnodes); 428 /* 429 * Insert into list of vnodes for the new mount point, if available. 430 */ 431 if ((vp->v_mount = mp) == NULL) 432 return; 433 LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes); 434} 435 436/* 437 * Update outstanding I/O count and do wakeup if requested. 438 */ 439void 440vwakeup(bp) 441 register struct buf *bp; 442{ 443 register struct vnode *vp; 444 445 bp->b_flags &= ~B_WRITEINPROG; 446 if ((vp = bp->b_vp)) { 447 vp->v_numoutput--; 448 if (vp->v_numoutput < 0) 449 panic("vwakeup: neg numoutput"); 450 if ((vp->v_numoutput == 0) && (vp->v_flag & VBWAIT)) { 451 vp->v_flag &= ~VBWAIT; 452 wakeup((caddr_t) &vp->v_numoutput); 453 } 454 } 455} 456 457/* 458 * Flush out and invalidate all buffers associated with a vnode. 459 * Called with the underlying object locked. 460 */ 461int 462vinvalbuf(vp, flags, cred, p, slpflag, slptimeo) 463 register struct vnode *vp; 464 int flags; 465 struct ucred *cred; 466 struct proc *p; 467 int slpflag, slptimeo; 468{ 469 register struct buf *bp; 470 struct buf *nbp, *blist; 471 int s, error; 472 vm_object_t object; 473 474 if (flags & V_SAVE) { 475 if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, p))) 476 return (error); 477 if (vp->v_dirtyblkhd.lh_first != NULL) 478 panic("vinvalbuf: dirty bufs"); 479 }
| 70#include <sys/sysctl.h> 71 72#include <miscfs/specfs/specdev.h> 73 74#ifdef DDB 75extern void printlockedvnodes __P((void)); 76#endif 77extern void vclean __P((struct vnode *vp, int flags)); 78extern void vfs_unmountroot __P((struct mount *rootfs)); 79 80enum vtype iftovt_tab[16] = { 81 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 82 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD, 83}; 84int vttoif_tab[9] = { 85 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 86 S_IFSOCK, S_IFIFO, S_IFMT, 87}; 88 89/* 90 * Insq/Remq for the vnode usage lists. 91 */ 92#define bufinsvn(bp, dp) LIST_INSERT_HEAD(dp, bp, b_vnbufs) 93#define bufremvn(bp) { \ 94 LIST_REMOVE(bp, b_vnbufs); \ 95 (bp)->b_vnbufs.le_next = NOLIST; \ 96} 97 98TAILQ_HEAD(freelst, vnode) vnode_free_list; /* vnode free list */ 99static u_long freevnodes = 0; 100 101struct mntlist mountlist; /* mounted filesystem list */ 102 103int desiredvnodes; 104SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RD, &desiredvnodes, 0, ""); 105 106static void vfs_free_addrlist __P((struct netexport *nep)); 107static int vfs_free_netcred __P((struct radix_node *rn, void *w)); 108static int vfs_hang_addrlist __P((struct mount *mp, struct netexport *nep, 109 struct export_args *argp)); 110 111/* 112 * Initialize the vnode management data structures. 113 */ 114void 115vntblinit() 116{ 117 desiredvnodes = maxproc + vm_object_cache_max + extravnodes; 118 119 TAILQ_INIT(&vnode_free_list); 120 CIRCLEQ_INIT(&mountlist); 121} 122 123/* 124 * Lock a filesystem. 125 * Used to prevent access to it while mounting and unmounting. 126 */ 127int 128vfs_lock(mp) 129 register struct mount *mp; 130{ 131 132 while (mp->mnt_flag & MNT_MLOCK) { 133 mp->mnt_flag |= MNT_MWAIT; 134 (void) tsleep((caddr_t) mp, PVFS, "vfslck", 0); 135 } 136 mp->mnt_flag |= MNT_MLOCK; 137 return (0); 138} 139 140/* 141 * Unlock a locked filesystem. 142 * Panic if filesystem is not locked. 143 */ 144void 145vfs_unlock(mp) 146 register struct mount *mp; 147{ 148 149 if ((mp->mnt_flag & MNT_MLOCK) == 0) 150 panic("vfs_unlock: not locked"); 151 mp->mnt_flag &= ~MNT_MLOCK; 152 if (mp->mnt_flag & MNT_MWAIT) { 153 mp->mnt_flag &= ~MNT_MWAIT; 154 wakeup((caddr_t) mp); 155 } 156} 157 158/* 159 * Mark a mount point as busy. 160 * Used to synchronize access and to delay unmounting. 161 */ 162int 163vfs_busy(mp) 164 register struct mount *mp; 165{ 166 167 while (mp->mnt_flag & MNT_MPBUSY) { 168 mp->mnt_flag |= MNT_MPWANT; 169 (void) tsleep((caddr_t) &mp->mnt_flag, PVFS, "vfsbsy", 0); 170 } 171 if (mp->mnt_flag & MNT_UNMOUNT) 172 return (1); 173 mp->mnt_flag |= MNT_MPBUSY; 174 return (0); 175} 176 177/* 178 * Free a busy filesystem. 179 * Panic if filesystem is not busy. 180 */ 181void 182vfs_unbusy(mp) 183 register struct mount *mp; 184{ 185 186 if ((mp->mnt_flag & MNT_MPBUSY) == 0) 187 panic("vfs_unbusy: not busy"); 188 mp->mnt_flag &= ~MNT_MPBUSY; 189 if (mp->mnt_flag & MNT_MPWANT) { 190 mp->mnt_flag &= ~MNT_MPWANT; 191 wakeup((caddr_t) &mp->mnt_flag); 192 } 193} 194 195void 196vfs_unmountroot(struct mount *rootfs) 197{ 198 struct mount *mp = rootfs; 199 int error; 200 201 if (vfs_busy(mp)) { 202 printf("failed to unmount root\n"); 203 return; 204 } 205 mp->mnt_flag |= MNT_UNMOUNT; 206 if ((error = vfs_lock(mp))) { 207 printf("lock of root filesystem failed (%d)\n", error); 208 return; 209 } 210 vnode_pager_umount(mp); /* release cached vnodes */ 211 cache_purgevfs(mp); /* remove cache entries for this file sys */ 212 213 if ((error = VFS_SYNC(mp, MNT_WAIT, initproc->p_ucred, initproc))) 214 printf("sync of root filesystem failed (%d)\n", error); 215 216 if ((error = VFS_UNMOUNT(mp, MNT_FORCE, initproc))) { 217 printf("unmount of root filesystem failed ("); 218 if (error == EBUSY) 219 printf("BUSY)\n"); 220 else 221 printf("%d)\n", error); 222 } 223 mp->mnt_flag &= ~MNT_UNMOUNT; 224 vfs_unbusy(mp); 225} 226 227/* 228 * Unmount all filesystems. Should only be called by halt(). 229 */ 230void 231vfs_unmountall() 232{ 233 struct mount *mp, *nmp, *rootfs = NULL; 234 int error; 235 236 /* unmount all but rootfs */ 237 for (mp = mountlist.cqh_last; mp != (void *)&mountlist; mp = nmp) { 238 nmp = mp->mnt_list.cqe_prev; 239 240 if (mp->mnt_flag & MNT_ROOTFS) { 241 rootfs = mp; 242 continue; 243 } 244 error = dounmount(mp, MNT_FORCE, initproc); 245 if (error) { 246 printf("unmount of %s failed (", mp->mnt_stat.f_mntonname); 247 if (error == EBUSY) 248 printf("BUSY)\n"); 249 else 250 printf("%d)\n", error); 251 } 252 } 253 254 /* and finally... */ 255 if (rootfs) { 256 vfs_unmountroot(rootfs); 257 } else { 258 printf("no root filesystem\n"); 259 } 260} 261 262/* 263 * Lookup a mount point by filesystem identifier. 264 */ 265struct mount * 266getvfs(fsid) 267 fsid_t *fsid; 268{ 269 register struct mount *mp; 270 271 for (mp = mountlist.cqh_first; mp != (void *)&mountlist; 272 mp = mp->mnt_list.cqe_next) { 273 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 274 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) 275 return (mp); 276 } 277 return ((struct mount *) 0); 278} 279 280/* 281 * Get a new unique fsid 282 */ 283void 284getnewfsid(mp, mtype) 285 struct mount *mp; 286 int mtype; 287{ 288 static u_short xxxfs_mntid; 289 290 fsid_t tfsid; 291 292 mp->mnt_stat.f_fsid.val[0] = makedev(nblkdev + mtype, 0); 293 mp->mnt_stat.f_fsid.val[1] = mtype; 294 if (xxxfs_mntid == 0) 295 ++xxxfs_mntid; 296 tfsid.val[0] = makedev(nblkdev + mtype, xxxfs_mntid); 297 tfsid.val[1] = mtype; 298 if (mountlist.cqh_first != (void *)&mountlist) { 299 while (getvfs(&tfsid)) { 300 tfsid.val[0]++; 301 xxxfs_mntid++; 302 } 303 } 304 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 305} 306 307/* 308 * Set vnode attributes to VNOVAL 309 */ 310void 311vattr_null(vap) 312 register struct vattr *vap; 313{ 314 315 vap->va_type = VNON; 316 vap->va_size = VNOVAL; 317 vap->va_bytes = VNOVAL; 318 vap->va_mode = vap->va_nlink = vap->va_uid = vap->va_gid = 319 vap->va_fsid = vap->va_fileid = 320 vap->va_blocksize = vap->va_rdev = 321 vap->va_atime.ts_sec = vap->va_atime.ts_nsec = 322 vap->va_mtime.ts_sec = vap->va_mtime.ts_nsec = 323 vap->va_ctime.ts_sec = vap->va_ctime.ts_nsec = 324 vap->va_flags = vap->va_gen = VNOVAL; 325 vap->va_vaflags = 0; 326} 327 328/* 329 * Routines having to do with the management of the vnode table. 330 */ 331extern vop_t **dead_vnodeop_p; 332 333/* 334 * Return the next vnode from the free list. 335 */ 336int 337getnewvnode(tag, mp, vops, vpp) 338 enum vtagtype tag; 339 struct mount *mp; 340 vop_t **vops; 341 struct vnode **vpp; 342{ 343 register struct vnode *vp; 344 345retry: 346 vp = vnode_free_list.tqh_first; 347 /* 348 * we allocate a new vnode if 349 * 1. we don't have any free 350 * Pretty obvious, we actually used to panic, but that 351 * is a silly thing to do. 352 * 2. we havn't filled our pool yet 353 * We don't want to trash the incore (VM-)vnodecache. 354 * 3. if less that 1/4th of our vnodes are free. 355 * We don't want to trash the namei cache either. 356 */ 357 if (freevnodes < (numvnodes >> 2) || 358 numvnodes < desiredvnodes || 359 vp == NULL) { 360 vp = (struct vnode *) malloc((u_long) sizeof *vp, 361 M_VNODE, M_WAITOK); 362 bzero((char *) vp, sizeof *vp); 363 numvnodes++; 364 } else { 365 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 366 if (vp->v_usage > 0) { 367 --vp->v_usage; 368 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 369 goto retry; 370 } 371 freevnodes--; 372 if (vp->v_usecount) 373 panic("free vnode isn't"); 374 375 /* see comment on why 0xdeadb is set at end of vgone (below) */ 376 vp->v_freelist.tqe_prev = (struct vnode **) 0xdeadb; 377 vp->v_lease = NULL; 378 if (vp->v_type != VBAD) 379 vgone(vp); 380 381#ifdef DIAGNOSTIC 382 { 383 int s; 384 385 if (vp->v_data) 386 panic("cleaned vnode isn't"); 387 s = splbio(); 388 if (vp->v_numoutput) 389 panic("Clean vnode has pending I/O's"); 390 splx(s); 391 } 392#endif 393 vp->v_flag = 0; 394 vp->v_lastr = 0; 395 vp->v_ralen = 0; 396 vp->v_maxra = 0; 397 vp->v_lastw = 0; 398 vp->v_lasta = 0; 399 vp->v_cstart = 0; 400 vp->v_clen = 0; 401 vp->v_socket = 0; 402 vp->v_writecount = 0; /* XXX */ 403 vp->v_usage = 0; 404 } 405 vp->v_type = VNON; 406 cache_purge(vp); 407 vp->v_tag = tag; 408 vp->v_op = vops; 409 insmntque(vp, mp); 410 *vpp = vp; 411 vp->v_usecount = 1; 412 vp->v_data = 0; 413 return (0); 414} 415 416/* 417 * Move a vnode from one mount queue to another. 418 */ 419void 420insmntque(vp, mp) 421 register struct vnode *vp; 422 register struct mount *mp; 423{ 424 425 /* 426 * Delete from old mount point vnode list, if on one. 427 */ 428 if (vp->v_mount != NULL) 429 LIST_REMOVE(vp, v_mntvnodes); 430 /* 431 * Insert into list of vnodes for the new mount point, if available. 432 */ 433 if ((vp->v_mount = mp) == NULL) 434 return; 435 LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes); 436} 437 438/* 439 * Update outstanding I/O count and do wakeup if requested. 440 */ 441void 442vwakeup(bp) 443 register struct buf *bp; 444{ 445 register struct vnode *vp; 446 447 bp->b_flags &= ~B_WRITEINPROG; 448 if ((vp = bp->b_vp)) { 449 vp->v_numoutput--; 450 if (vp->v_numoutput < 0) 451 panic("vwakeup: neg numoutput"); 452 if ((vp->v_numoutput == 0) && (vp->v_flag & VBWAIT)) { 453 vp->v_flag &= ~VBWAIT; 454 wakeup((caddr_t) &vp->v_numoutput); 455 } 456 } 457} 458 459/* 460 * Flush out and invalidate all buffers associated with a vnode. 461 * Called with the underlying object locked. 462 */ 463int 464vinvalbuf(vp, flags, cred, p, slpflag, slptimeo) 465 register struct vnode *vp; 466 int flags; 467 struct ucred *cred; 468 struct proc *p; 469 int slpflag, slptimeo; 470{ 471 register struct buf *bp; 472 struct buf *nbp, *blist; 473 int s, error; 474 vm_object_t object; 475 476 if (flags & V_SAVE) { 477 if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, p))) 478 return (error); 479 if (vp->v_dirtyblkhd.lh_first != NULL) 480 panic("vinvalbuf: dirty bufs"); 481 }
|
| 482 483 s = splbio();
|
480 for (;;) { 481 if ((blist = vp->v_cleanblkhd.lh_first) && (flags & V_SAVEMETA)) 482 while (blist && blist->b_lblkno < 0) 483 blist = blist->b_vnbufs.le_next; 484 if (!blist && (blist = vp->v_dirtyblkhd.lh_first) && 485 (flags & V_SAVEMETA)) 486 while (blist && blist->b_lblkno < 0) 487 blist = blist->b_vnbufs.le_next; 488 if (!blist) 489 break; 490 491 for (bp = blist; bp; bp = nbp) { 492 nbp = bp->b_vnbufs.le_next; 493 if ((flags & V_SAVEMETA) && bp->b_lblkno < 0) 494 continue;
| 484 for (;;) { 485 if ((blist = vp->v_cleanblkhd.lh_first) && (flags & V_SAVEMETA)) 486 while (blist && blist->b_lblkno < 0) 487 blist = blist->b_vnbufs.le_next; 488 if (!blist && (blist = vp->v_dirtyblkhd.lh_first) && 489 (flags & V_SAVEMETA)) 490 while (blist && blist->b_lblkno < 0) 491 blist = blist->b_vnbufs.le_next; 492 if (!blist) 493 break; 494 495 for (bp = blist; bp; bp = nbp) { 496 nbp = bp->b_vnbufs.le_next; 497 if ((flags & V_SAVEMETA) && bp->b_lblkno < 0) 498 continue;
|
495 s = splbio();
| |
496 if (bp->b_flags & B_BUSY) { 497 bp->b_flags |= B_WANTED; 498 error = tsleep((caddr_t) bp, 499 slpflag | (PRIBIO + 1), "vinvalbuf", 500 slptimeo); 501 splx(s); 502 if (error) 503 return (error); 504 break; 505 } 506 bremfree(bp); 507 bp->b_flags |= B_BUSY;
| 499 if (bp->b_flags & B_BUSY) { 500 bp->b_flags |= B_WANTED; 501 error = tsleep((caddr_t) bp, 502 slpflag | (PRIBIO + 1), "vinvalbuf", 503 slptimeo); 504 splx(s); 505 if (error) 506 return (error); 507 break; 508 } 509 bremfree(bp); 510 bp->b_flags |= B_BUSY;
|
508 splx(s);
| |
509 /* 510 * XXX Since there are no node locks for NFS, I 511 * believe there is a slight chance that a delayed 512 * write will occur while sleeping just above, so 513 * check for it. 514 */ 515 if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) { 516 (void) VOP_BWRITE(bp); 517 break; 518 } 519 bp->b_flags |= (B_INVAL|B_NOCACHE|B_RELBUF); 520 brelse(bp); 521 } 522 }
| 511 /* 512 * XXX Since there are no node locks for NFS, I 513 * believe there is a slight chance that a delayed 514 * write will occur while sleeping just above, so 515 * check for it. 516 */ 517 if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) { 518 (void) VOP_BWRITE(bp); 519 break; 520 } 521 bp->b_flags |= (B_INVAL|B_NOCACHE|B_RELBUF); 522 brelse(bp); 523 } 524 }
|
| 525 splx(s);
|
523 524 s = splbio(); 525 while (vp->v_numoutput > 0) { 526 vp->v_flag |= VBWAIT; 527 tsleep(&vp->v_numoutput, PVM, "vnvlbv", 0); 528 } 529 splx(s); 530 531 /* 532 * Destroy the copy in the VM cache, too. 533 */ 534 object = vp->v_object; 535 if (object != NULL) { 536 vm_object_page_remove(object, 0, object->size, 537 (flags & V_SAVE) ? TRUE : FALSE); 538 } 539 if (!(flags & V_SAVEMETA) && 540 (vp->v_dirtyblkhd.lh_first || vp->v_cleanblkhd.lh_first)) 541 panic("vinvalbuf: flush failed"); 542 return (0); 543} 544 545/* 546 * Associate a buffer with a vnode. 547 */ 548void 549bgetvp(vp, bp) 550 register struct vnode *vp; 551 register struct buf *bp; 552{ 553 int s; 554 555 if (bp->b_vp) 556 panic("bgetvp: not free"); 557 VHOLD(vp); 558 bp->b_vp = vp; 559 if (vp->v_type == VBLK || vp->v_type == VCHR) 560 bp->b_dev = vp->v_rdev; 561 else 562 bp->b_dev = NODEV; 563 /* 564 * Insert onto list for new vnode. 565 */ 566 s = splbio(); 567 bufinsvn(bp, &vp->v_cleanblkhd); 568 splx(s); 569} 570 571/* 572 * Disassociate a buffer from a vnode. 573 */ 574void 575brelvp(bp) 576 register struct buf *bp; 577{ 578 struct vnode *vp; 579 int s; 580 581 if (bp->b_vp == (struct vnode *) 0) 582 panic("brelvp: NULL"); 583 /* 584 * Delete from old vnode list, if on one. 585 */ 586 s = splbio(); 587 if (bp->b_vnbufs.le_next != NOLIST) 588 bufremvn(bp); 589 splx(s); 590 591 vp = bp->b_vp; 592 bp->b_vp = (struct vnode *) 0; 593 HOLDRELE(vp); 594} 595 596/* 597 * Associate a p-buffer with a vnode. 598 */ 599void 600pbgetvp(vp, bp) 601 register struct vnode *vp; 602 register struct buf *bp; 603{ 604 if (bp->b_vp) 605 panic("pbgetvp: not free"); 606 VHOLD(vp); 607 bp->b_vp = vp; 608 if (vp->v_type == VBLK || vp->v_type == VCHR) 609 bp->b_dev = vp->v_rdev; 610 else 611 bp->b_dev = NODEV; 612} 613 614/* 615 * Disassociate a p-buffer from a vnode. 616 */ 617void 618pbrelvp(bp) 619 register struct buf *bp; 620{ 621 struct vnode *vp; 622 623 if (bp->b_vp == (struct vnode *) 0) 624 panic("brelvp: NULL"); 625 626 vp = bp->b_vp; 627 bp->b_vp = (struct vnode *) 0; 628 HOLDRELE(vp); 629} 630 631/* 632 * Reassign a buffer from one vnode to another. 633 * Used to assign file specific control information 634 * (indirect blocks) to the vnode to which they belong. 635 */ 636void 637reassignbuf(bp, newvp) 638 register struct buf *bp; 639 register struct vnode *newvp; 640{
| 526 527 s = splbio(); 528 while (vp->v_numoutput > 0) { 529 vp->v_flag |= VBWAIT; 530 tsleep(&vp->v_numoutput, PVM, "vnvlbv", 0); 531 } 532 splx(s); 533 534 /* 535 * Destroy the copy in the VM cache, too. 536 */ 537 object = vp->v_object; 538 if (object != NULL) { 539 vm_object_page_remove(object, 0, object->size, 540 (flags & V_SAVE) ? TRUE : FALSE); 541 } 542 if (!(flags & V_SAVEMETA) && 543 (vp->v_dirtyblkhd.lh_first || vp->v_cleanblkhd.lh_first)) 544 panic("vinvalbuf: flush failed"); 545 return (0); 546} 547 548/* 549 * Associate a buffer with a vnode. 550 */ 551void 552bgetvp(vp, bp) 553 register struct vnode *vp; 554 register struct buf *bp; 555{ 556 int s; 557 558 if (bp->b_vp) 559 panic("bgetvp: not free"); 560 VHOLD(vp); 561 bp->b_vp = vp; 562 if (vp->v_type == VBLK || vp->v_type == VCHR) 563 bp->b_dev = vp->v_rdev; 564 else 565 bp->b_dev = NODEV; 566 /* 567 * Insert onto list for new vnode. 568 */ 569 s = splbio(); 570 bufinsvn(bp, &vp->v_cleanblkhd); 571 splx(s); 572} 573 574/* 575 * Disassociate a buffer from a vnode. 576 */ 577void 578brelvp(bp) 579 register struct buf *bp; 580{ 581 struct vnode *vp; 582 int s; 583 584 if (bp->b_vp == (struct vnode *) 0) 585 panic("brelvp: NULL"); 586 /* 587 * Delete from old vnode list, if on one. 588 */ 589 s = splbio(); 590 if (bp->b_vnbufs.le_next != NOLIST) 591 bufremvn(bp); 592 splx(s); 593 594 vp = bp->b_vp; 595 bp->b_vp = (struct vnode *) 0; 596 HOLDRELE(vp); 597} 598 599/* 600 * Associate a p-buffer with a vnode. 601 */ 602void 603pbgetvp(vp, bp) 604 register struct vnode *vp; 605 register struct buf *bp; 606{ 607 if (bp->b_vp) 608 panic("pbgetvp: not free"); 609 VHOLD(vp); 610 bp->b_vp = vp; 611 if (vp->v_type == VBLK || vp->v_type == VCHR) 612 bp->b_dev = vp->v_rdev; 613 else 614 bp->b_dev = NODEV; 615} 616 617/* 618 * Disassociate a p-buffer from a vnode. 619 */ 620void 621pbrelvp(bp) 622 register struct buf *bp; 623{ 624 struct vnode *vp; 625 626 if (bp->b_vp == (struct vnode *) 0) 627 panic("brelvp: NULL"); 628 629 vp = bp->b_vp; 630 bp->b_vp = (struct vnode *) 0; 631 HOLDRELE(vp); 632} 633 634/* 635 * Reassign a buffer from one vnode to another. 636 * Used to assign file specific control information 637 * (indirect blocks) to the vnode to which they belong. 638 */ 639void 640reassignbuf(bp, newvp) 641 register struct buf *bp; 642 register struct vnode *newvp; 643{
|
641 register struct buflists *listheadp;
| |
642 int s; 643 644 if (newvp == NULL) { 645 printf("reassignbuf: NULL"); 646 return; 647 } 648 649 s = splbio(); 650 /* 651 * Delete from old vnode list, if on one. 652 */ 653 if (bp->b_vnbufs.le_next != NOLIST) 654 bufremvn(bp); 655 /* 656 * If dirty, put on list of dirty buffers; otherwise insert onto list 657 * of clean buffers. 658 */ 659 if (bp->b_flags & B_DELWRI) { 660 struct buf *tbp; 661 662 tbp = newvp->v_dirtyblkhd.lh_first; 663 if (!tbp || (tbp->b_lblkno > bp->b_lblkno)) { 664 bufinsvn(bp, &newvp->v_dirtyblkhd); 665 } else { 666 while (tbp->b_vnbufs.le_next && 667 (tbp->b_vnbufs.le_next->b_lblkno < bp->b_lblkno)) { 668 tbp = tbp->b_vnbufs.le_next; 669 } 670 LIST_INSERT_AFTER(tbp, bp, b_vnbufs); 671 } 672 } else {
| 644 int s; 645 646 if (newvp == NULL) { 647 printf("reassignbuf: NULL"); 648 return; 649 } 650 651 s = splbio(); 652 /* 653 * Delete from old vnode list, if on one. 654 */ 655 if (bp->b_vnbufs.le_next != NOLIST) 656 bufremvn(bp); 657 /* 658 * If dirty, put on list of dirty buffers; otherwise insert onto list 659 * of clean buffers. 660 */ 661 if (bp->b_flags & B_DELWRI) { 662 struct buf *tbp; 663 664 tbp = newvp->v_dirtyblkhd.lh_first; 665 if (!tbp || (tbp->b_lblkno > bp->b_lblkno)) { 666 bufinsvn(bp, &newvp->v_dirtyblkhd); 667 } else { 668 while (tbp->b_vnbufs.le_next && 669 (tbp->b_vnbufs.le_next->b_lblkno < bp->b_lblkno)) { 670 tbp = tbp->b_vnbufs.le_next; 671 } 672 LIST_INSERT_AFTER(tbp, bp, b_vnbufs); 673 } 674 } else {
|
673 listheadp = &newvp->v_cleanblkhd; 674 bufinsvn(bp, listheadp);
| 675 bufinsvn(bp, &newvp->v_cleanblkhd);
|
675 } 676 splx(s); 677} 678 679#ifndef DEVFS_ROOT 680/* 681 * Create a vnode for a block device. 682 * Used for root filesystem, argdev, and swap areas. 683 * Also used for memory file system special devices. 684 */ 685int 686bdevvp(dev, vpp) 687 dev_t dev; 688 struct vnode **vpp; 689{ 690 register struct vnode *vp; 691 struct vnode *nvp; 692 int error; 693 694 if (dev == NODEV) 695 return (0); 696 error = getnewvnode(VT_NON, (struct mount *) 0, spec_vnodeop_p, &nvp); 697 if (error) { 698 *vpp = 0; 699 return (error); 700 } 701 vp = nvp; 702 vp->v_type = VBLK; 703 if ((nvp = checkalias(vp, dev, (struct mount *) 0))) { 704 vput(vp); 705 vp = nvp; 706 } 707 *vpp = vp; 708 return (0); 709} 710#endif /* !DEVFS_ROOT */ 711 712/* 713 * Check to see if the new vnode represents a special device 714 * for which we already have a vnode (either because of 715 * bdevvp() or because of a different vnode representing 716 * the same block device). If such an alias exists, deallocate 717 * the existing contents and return the aliased vnode. The 718 * caller is responsible for filling it with its new contents. 719 */ 720struct vnode * 721checkalias(nvp, nvp_rdev, mp) 722 register struct vnode *nvp; 723 dev_t nvp_rdev; 724 struct mount *mp; 725{ 726 register struct vnode *vp; 727 struct vnode **vpp; 728 729 if (nvp->v_type != VBLK && nvp->v_type != VCHR) 730 return (NULLVP); 731 732 vpp = &speclisth[SPECHASH(nvp_rdev)]; 733loop: 734 for (vp = *vpp; vp; vp = vp->v_specnext) { 735 if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type) 736 continue; 737 /* 738 * Alias, but not in use, so flush it out. 739 */ 740 if (vp->v_usecount == 0) { 741 vgone(vp); 742 goto loop; 743 } 744 if (vget(vp, 1)) 745 goto loop; 746 break; 747 }
| 676 } 677 splx(s); 678} 679 680#ifndef DEVFS_ROOT 681/* 682 * Create a vnode for a block device. 683 * Used for root filesystem, argdev, and swap areas. 684 * Also used for memory file system special devices. 685 */ 686int 687bdevvp(dev, vpp) 688 dev_t dev; 689 struct vnode **vpp; 690{ 691 register struct vnode *vp; 692 struct vnode *nvp; 693 int error; 694 695 if (dev == NODEV) 696 return (0); 697 error = getnewvnode(VT_NON, (struct mount *) 0, spec_vnodeop_p, &nvp); 698 if (error) { 699 *vpp = 0; 700 return (error); 701 } 702 vp = nvp; 703 vp->v_type = VBLK; 704 if ((nvp = checkalias(vp, dev, (struct mount *) 0))) { 705 vput(vp); 706 vp = nvp; 707 } 708 *vpp = vp; 709 return (0); 710} 711#endif /* !DEVFS_ROOT */ 712 713/* 714 * Check to see if the new vnode represents a special device 715 * for which we already have a vnode (either because of 716 * bdevvp() or because of a different vnode representing 717 * the same block device). If such an alias exists, deallocate 718 * the existing contents and return the aliased vnode. The 719 * caller is responsible for filling it with its new contents. 720 */ 721struct vnode * 722checkalias(nvp, nvp_rdev, mp) 723 register struct vnode *nvp; 724 dev_t nvp_rdev; 725 struct mount *mp; 726{ 727 register struct vnode *vp; 728 struct vnode **vpp; 729 730 if (nvp->v_type != VBLK && nvp->v_type != VCHR) 731 return (NULLVP); 732 733 vpp = &speclisth[SPECHASH(nvp_rdev)]; 734loop: 735 for (vp = *vpp; vp; vp = vp->v_specnext) { 736 if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type) 737 continue; 738 /* 739 * Alias, but not in use, so flush it out. 740 */ 741 if (vp->v_usecount == 0) { 742 vgone(vp); 743 goto loop; 744 } 745 if (vget(vp, 1)) 746 goto loop; 747 break; 748 }
|
| 749
|
748 if (vp == NULL || vp->v_tag != VT_NON) { 749 MALLOC(nvp->v_specinfo, struct specinfo *, 750 sizeof(struct specinfo), M_VNODE, M_WAITOK); 751 nvp->v_rdev = nvp_rdev; 752 nvp->v_hashchain = vpp; 753 nvp->v_specnext = *vpp; 754 nvp->v_specflags = 0; 755 *vpp = nvp; 756 if (vp != NULL) { 757 nvp->v_flag |= VALIASED; 758 vp->v_flag |= VALIASED; 759 vput(vp); 760 } 761 return (NULLVP); 762 } 763 VOP_UNLOCK(vp); 764 vclean(vp, 0); 765 vp->v_op = nvp->v_op; 766 vp->v_tag = nvp->v_tag; 767 nvp->v_type = VNON; 768 insmntque(vp, mp); 769 return (vp); 770} 771 772/* 773 * Grab a particular vnode from the free list, increment its 774 * reference count and lock it. The vnode lock bit is set the 775 * vnode is being eliminated in vgone. The process is awakened 776 * when the transition is completed, and an error returned to 777 * indicate that the vnode is no longer usable (possibly having 778 * been changed to a new file system type). 779 */ 780int 781vget(vp, lockflag) 782 register struct vnode *vp; 783 int lockflag; 784{ 785 786 /* 787 * If the vnode is in the process of being cleaned out for another 788 * use, we wait for the cleaning to finish and then return failure. 789 * Cleaning is determined either by checking that the VXLOCK flag is 790 * set, or that the use count is zero with the back pointer set to 791 * show that it has been removed from the free list by getnewvnode. 792 * The VXLOCK flag may not have been set yet because vclean is blocked 793 * in the VOP_LOCK call waiting for the VOP_INACTIVE to complete. 794 */ 795 if ((vp->v_flag & VXLOCK) || 796 (vp->v_usecount == 0 && 797 vp->v_freelist.tqe_prev == (struct vnode **) 0xdeadb)) { 798 vp->v_flag |= VXWANT; 799 (void) tsleep((caddr_t) vp, PINOD, "vget", 0); 800 return (1); 801 } 802 if (vp->v_usecount == 0) { 803 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 804 freevnodes--; 805 } 806 vp->v_usecount++;
| 750 if (vp == NULL || vp->v_tag != VT_NON) { 751 MALLOC(nvp->v_specinfo, struct specinfo *, 752 sizeof(struct specinfo), M_VNODE, M_WAITOK); 753 nvp->v_rdev = nvp_rdev; 754 nvp->v_hashchain = vpp; 755 nvp->v_specnext = *vpp; 756 nvp->v_specflags = 0; 757 *vpp = nvp; 758 if (vp != NULL) { 759 nvp->v_flag |= VALIASED; 760 vp->v_flag |= VALIASED; 761 vput(vp); 762 } 763 return (NULLVP); 764 } 765 VOP_UNLOCK(vp); 766 vclean(vp, 0); 767 vp->v_op = nvp->v_op; 768 vp->v_tag = nvp->v_tag; 769 nvp->v_type = VNON; 770 insmntque(vp, mp); 771 return (vp); 772} 773 774/* 775 * Grab a particular vnode from the free list, increment its 776 * reference count and lock it. The vnode lock bit is set the 777 * vnode is being eliminated in vgone. The process is awakened 778 * when the transition is completed, and an error returned to 779 * indicate that the vnode is no longer usable (possibly having 780 * been changed to a new file system type). 781 */ 782int 783vget(vp, lockflag) 784 register struct vnode *vp; 785 int lockflag; 786{ 787 788 /* 789 * If the vnode is in the process of being cleaned out for another 790 * use, we wait for the cleaning to finish and then return failure. 791 * Cleaning is determined either by checking that the VXLOCK flag is 792 * set, or that the use count is zero with the back pointer set to 793 * show that it has been removed from the free list by getnewvnode. 794 * The VXLOCK flag may not have been set yet because vclean is blocked 795 * in the VOP_LOCK call waiting for the VOP_INACTIVE to complete. 796 */ 797 if ((vp->v_flag & VXLOCK) || 798 (vp->v_usecount == 0 && 799 vp->v_freelist.tqe_prev == (struct vnode **) 0xdeadb)) { 800 vp->v_flag |= VXWANT; 801 (void) tsleep((caddr_t) vp, PINOD, "vget", 0); 802 return (1); 803 } 804 if (vp->v_usecount == 0) { 805 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 806 freevnodes--; 807 } 808 vp->v_usecount++;
|
| 809 810 /* 811 * Create the VM object, if needed 812 */ 813 if ((vp->v_type == VREG) && 814 ((vp->v_object == NULL) || 815 (vp->v_object->flags & OBJ_VFS_REF) == 0)) { 816 vfs_object_create(vp, curproc, curproc->p_ucred, 0); 817 }
|
807 if (lockflag) 808 VOP_LOCK(vp);
| 818 if (lockflag) 819 VOP_LOCK(vp);
|
| 820
|
809 return (0); 810} 811 812/* 813 * Vnode reference, just increment the count 814 */ 815void 816vref(vp) 817 struct vnode *vp; 818{
| 821 return (0); 822} 823 824/* 825 * Vnode reference, just increment the count 826 */ 827void 828vref(vp) 829 struct vnode *vp; 830{
|
819
| |
820 if (vp->v_usecount <= 0) 821 panic("vref used where vget required");
| 831 if (vp->v_usecount <= 0) 832 panic("vref used where vget required");
|
| 833 834 if ((vp->v_type == VREG) && 835 ((vp->v_object == NULL) || 836 ((vp->v_object->flags & OBJ_VFS_REF) == 0)) ) { 837 /* 838 * We need to lock to VP during the time that 839 * the object is created. This is necessary to 840 * keep the system from re-entrantly doing it 841 * multiple times. 842 */ 843 vfs_object_create(vp, curproc, curproc->p_ucred, 0); 844 } 845
|
822 vp->v_usecount++; 823} 824 825/* 826 * vput(), just unlock and vrele() 827 */ 828void 829vput(vp) 830 register struct vnode *vp; 831{
| 846 vp->v_usecount++; 847} 848 849/* 850 * vput(), just unlock and vrele() 851 */ 852void 853vput(vp) 854 register struct vnode *vp; 855{
|
832
| |
833 VOP_UNLOCK(vp); 834 vrele(vp); 835} 836 837/* 838 * Vnode release. 839 * If count drops to zero, call inactive routine and return to freelist. 840 */ 841void 842vrele(vp) 843 register struct vnode *vp; 844{ 845 846#ifdef DIAGNOSTIC 847 if (vp == NULL) 848 panic("vrele: null vp"); 849#endif
| 856 VOP_UNLOCK(vp); 857 vrele(vp); 858} 859 860/* 861 * Vnode release. 862 * If count drops to zero, call inactive routine and return to freelist. 863 */ 864void 865vrele(vp) 866 register struct vnode *vp; 867{ 868 869#ifdef DIAGNOSTIC 870 if (vp == NULL) 871 panic("vrele: null vp"); 872#endif
|
| 873
|
850 vp->v_usecount--;
| 874 vp->v_usecount--;
|
| 875 876 if ((vp->v_usecount == 1) && 877 vp->v_object && 878 (vp->v_object->flags & OBJ_VFS_REF)) { 879 vp->v_object->flags &= ~OBJ_VFS_REF; 880 vm_object_deallocate(vp->v_object); 881 return; 882 } 883
|
851 if (vp->v_usecount > 0) 852 return;
| 884 if (vp->v_usecount > 0) 885 return;
|
853 if (vp->v_usecount < 0 /* || vp->v_writecount < 0 */ ) {
| 886 887 if (vp->v_usecount < 0) {
|
854#ifdef DIAGNOSTIC 855 vprint("vrele: negative ref count", vp); 856#endif 857 panic("vrele: negative reference cnt"); 858 } 859 if (vp->v_flag & VAGE) { 860 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); 861 vp->v_flag &= ~VAGE; 862 vp->v_usage = 0; 863 } else { 864 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 865 } 866 freevnodes++; 867 868 VOP_INACTIVE(vp); 869} 870 871#ifdef DIAGNOSTIC 872/* 873 * Page or buffer structure gets a reference. 874 */ 875void 876vhold(vp) 877 register struct vnode *vp; 878{ 879 880 vp->v_holdcnt++; 881} 882 883/* 884 * Page or buffer structure frees a reference. 885 */ 886void 887holdrele(vp) 888 register struct vnode *vp; 889{ 890 891 if (vp->v_holdcnt <= 0) 892 panic("holdrele: holdcnt"); 893 vp->v_holdcnt--; 894} 895#endif /* DIAGNOSTIC */ 896 897/* 898 * Remove any vnodes in the vnode table belonging to mount point mp. 899 * 900 * If MNT_NOFORCE is specified, there should not be any active ones, 901 * return error if any are found (nb: this is a user error, not a 902 * system error). If MNT_FORCE is specified, detach any active vnodes 903 * that are found. 904 */ 905#ifdef DIAGNOSTIC 906static int busyprt = 0; /* print out busy vnodes */ 907SYSCTL_INT(_debug, 1, busyprt, CTLFLAG_RW, &busyprt, 0, ""); 908#endif 909 910int 911vflush(mp, skipvp, flags) 912 struct mount *mp; 913 struct vnode *skipvp; 914 int flags; 915{ 916 register struct vnode *vp, *nvp; 917 int busy = 0; 918 919 if ((mp->mnt_flag & MNT_MPBUSY) == 0) 920 panic("vflush: not busy"); 921loop: 922 for (vp = mp->mnt_vnodelist.lh_first; vp; vp = nvp) { 923 /* 924 * Make sure this vnode wasn't reclaimed in getnewvnode(). 925 * Start over if it has (it won't be on the list anymore). 926 */ 927 if (vp->v_mount != mp) 928 goto loop; 929 nvp = vp->v_mntvnodes.le_next; 930 /* 931 * Skip over a selected vnode. 932 */ 933 if (vp == skipvp) 934 continue; 935 /* 936 * Skip over a vnodes marked VSYSTEM. 937 */ 938 if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) 939 continue; 940 /* 941 * If WRITECLOSE is set, only flush out regular file vnodes 942 * open for writing. 943 */ 944 if ((flags & WRITECLOSE) && 945 (vp->v_writecount == 0 || vp->v_type != VREG)) 946 continue;
| 888#ifdef DIAGNOSTIC 889 vprint("vrele: negative ref count", vp); 890#endif 891 panic("vrele: negative reference cnt"); 892 } 893 if (vp->v_flag & VAGE) { 894 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); 895 vp->v_flag &= ~VAGE; 896 vp->v_usage = 0; 897 } else { 898 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); 899 } 900 freevnodes++; 901 902 VOP_INACTIVE(vp); 903} 904 905#ifdef DIAGNOSTIC 906/* 907 * Page or buffer structure gets a reference. 908 */ 909void 910vhold(vp) 911 register struct vnode *vp; 912{ 913 914 vp->v_holdcnt++; 915} 916 917/* 918 * Page or buffer structure frees a reference. 919 */ 920void 921holdrele(vp) 922 register struct vnode *vp; 923{ 924 925 if (vp->v_holdcnt <= 0) 926 panic("holdrele: holdcnt"); 927 vp->v_holdcnt--; 928} 929#endif /* DIAGNOSTIC */ 930 931/* 932 * Remove any vnodes in the vnode table belonging to mount point mp. 933 * 934 * If MNT_NOFORCE is specified, there should not be any active ones, 935 * return error if any are found (nb: this is a user error, not a 936 * system error). If MNT_FORCE is specified, detach any active vnodes 937 * that are found. 938 */ 939#ifdef DIAGNOSTIC 940static int busyprt = 0; /* print out busy vnodes */ 941SYSCTL_INT(_debug, 1, busyprt, CTLFLAG_RW, &busyprt, 0, ""); 942#endif 943 944int 945vflush(mp, skipvp, flags) 946 struct mount *mp; 947 struct vnode *skipvp; 948 int flags; 949{ 950 register struct vnode *vp, *nvp; 951 int busy = 0; 952 953 if ((mp->mnt_flag & MNT_MPBUSY) == 0) 954 panic("vflush: not busy"); 955loop: 956 for (vp = mp->mnt_vnodelist.lh_first; vp; vp = nvp) { 957 /* 958 * Make sure this vnode wasn't reclaimed in getnewvnode(). 959 * Start over if it has (it won't be on the list anymore). 960 */ 961 if (vp->v_mount != mp) 962 goto loop; 963 nvp = vp->v_mntvnodes.le_next; 964 /* 965 * Skip over a selected vnode. 966 */ 967 if (vp == skipvp) 968 continue; 969 /* 970 * Skip over a vnodes marked VSYSTEM. 971 */ 972 if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) 973 continue; 974 /* 975 * If WRITECLOSE is set, only flush out regular file vnodes 976 * open for writing. 977 */ 978 if ((flags & WRITECLOSE) && 979 (vp->v_writecount == 0 || vp->v_type != VREG)) 980 continue;
|
| 981 982 if ((vp->v_usecount == 1) && vp->v_object) { 983 pager_cache(vp->v_object, FALSE); 984 } 985
|
947 /* 948 * With v_usecount == 0, all we need to do is clear out the 949 * vnode data structures and we are done. 950 */ 951 if (vp->v_usecount == 0) { 952 vgone(vp); 953 continue; 954 } 955 /* 956 * If FORCECLOSE is set, forcibly close the vnode. For block 957 * or character devices, revert to an anonymous device. For 958 * all other files, just kill them. 959 */ 960 if (flags & FORCECLOSE) { 961 if (vp->v_type != VBLK && vp->v_type != VCHR) { 962 vgone(vp); 963 } else { 964 vclean(vp, 0); 965 vp->v_op = spec_vnodeop_p; 966 insmntque(vp, (struct mount *) 0); 967 } 968 continue; 969 } 970#ifdef DIAGNOSTIC 971 if (busyprt) 972 vprint("vflush: busy vnode", vp); 973#endif 974 busy++; 975 } 976 if (busy) 977 return (EBUSY); 978 return (0); 979} 980 981/* 982 * Disassociate the underlying file system from a vnode. 983 */ 984void 985vclean(struct vnode *vp, int flags) 986{ 987 int active; 988 989 /* 990 * Check to see if the vnode is in use. If so we have to reference it 991 * before we clean it out so that its count cannot fall to zero and 992 * generate a race against ourselves to recycle it. 993 */ 994 if ((active = vp->v_usecount)) 995 VREF(vp); 996 /* 997 * Even if the count is zero, the VOP_INACTIVE routine may still have 998 * the object locked while it cleans it out. The VOP_LOCK ensures that 999 * the VOP_INACTIVE routine is done with its work. For active vnodes, 1000 * it ensures that no other activity can occur while the underlying 1001 * object is being cleaned out. 1002 */ 1003 VOP_LOCK(vp); 1004 /* 1005 * Prevent the vnode from being recycled or brought into use while we 1006 * clean it out. 1007 */ 1008 if (vp->v_flag & VXLOCK) 1009 panic("vclean: deadlock"); 1010 vp->v_flag |= VXLOCK; 1011 /* 1012 * Clean out any buffers associated with the vnode. 1013 */ 1014 if (flags & DOCLOSE) 1015 vinvalbuf(vp, V_SAVE, NOCRED, NULL, 0, 0); 1016 /* 1017 * Any other processes trying to obtain this lock must first wait for 1018 * VXLOCK to clear, then call the new lock operation. 1019 */ 1020 VOP_UNLOCK(vp); 1021 /* 1022 * If purging an active vnode, it must be closed and deactivated 1023 * before being reclaimed. 1024 */ 1025 if (active) { 1026 if (flags & DOCLOSE) 1027 VOP_CLOSE(vp, FNONBLOCK, NOCRED, NULL); 1028 VOP_INACTIVE(vp); 1029 } 1030 /* 1031 * Reclaim the vnode. 1032 */ 1033 if (VOP_RECLAIM(vp)) 1034 panic("vclean: cannot reclaim"); 1035 if (active) 1036 vrele(vp); 1037 1038 /* 1039 * Done with purge, notify sleepers of the grim news. 1040 */ 1041 vp->v_op = dead_vnodeop_p; 1042 vp->v_tag = VT_NON; 1043 vp->v_flag &= ~VXLOCK; 1044 if (vp->v_flag & VXWANT) { 1045 vp->v_flag &= ~VXWANT; 1046 wakeup((caddr_t) vp); 1047 } 1048} 1049 1050/* 1051 * Eliminate all activity associated with the requested vnode 1052 * and with all vnodes aliased to the requested vnode. 1053 */ 1054void 1055vgoneall(vp) 1056 register struct vnode *vp; 1057{ 1058 register struct vnode *vq; 1059 1060 if (vp->v_flag & VALIASED) { 1061 /* 1062 * If a vgone (or vclean) is already in progress, wait until 1063 * it is done and return. 1064 */ 1065 if (vp->v_flag & VXLOCK) { 1066 vp->v_flag |= VXWANT; 1067 (void) tsleep((caddr_t) vp, PINOD, "vgall", 0); 1068 return; 1069 } 1070 /* 1071 * Ensure that vp will not be vgone'd while we are eliminating 1072 * its aliases. 1073 */ 1074 vp->v_flag |= VXLOCK; 1075 while (vp->v_flag & VALIASED) { 1076 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { 1077 if (vq->v_rdev != vp->v_rdev || 1078 vq->v_type != vp->v_type || vp == vq) 1079 continue; 1080 vgone(vq); 1081 break; 1082 } 1083 } 1084 /* 1085 * Remove the lock so that vgone below will really eliminate 1086 * the vnode after which time vgone will awaken any sleepers. 1087 */ 1088 vp->v_flag &= ~VXLOCK; 1089 } 1090 vgone(vp); 1091} 1092 1093/* 1094 * Eliminate all activity associated with a vnode 1095 * in preparation for reuse. 1096 */ 1097void 1098vgone(vp) 1099 register struct vnode *vp; 1100{ 1101 register struct vnode *vq; 1102 struct vnode *vx; 1103 1104 /* 1105 * If a vgone (or vclean) is already in progress, wait until it is 1106 * done and return. 1107 */ 1108 if (vp->v_flag & VXLOCK) { 1109 vp->v_flag |= VXWANT; 1110 (void) tsleep((caddr_t) vp, PINOD, "vgone", 0); 1111 return; 1112 } 1113 /* 1114 * Clean out the filesystem specific data. 1115 */ 1116 vclean(vp, DOCLOSE); 1117 /* 1118 * Delete from old mount point vnode list, if on one. 1119 */ 1120 if (vp->v_mount != NULL) { 1121 LIST_REMOVE(vp, v_mntvnodes); 1122 vp->v_mount = NULL; 1123 } 1124 /* 1125 * If special device, remove it from special device alias list. 1126 */ 1127 if (vp->v_type == VBLK || vp->v_type == VCHR) { 1128 if (*vp->v_hashchain == vp) { 1129 *vp->v_hashchain = vp->v_specnext; 1130 } else { 1131 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { 1132 if (vq->v_specnext != vp) 1133 continue; 1134 vq->v_specnext = vp->v_specnext; 1135 break; 1136 } 1137 if (vq == NULL) 1138 panic("missing bdev"); 1139 } 1140 if (vp->v_flag & VALIASED) { 1141 vx = NULL; 1142 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { 1143 if (vq->v_rdev != vp->v_rdev || 1144 vq->v_type != vp->v_type) 1145 continue; 1146 if (vx) 1147 break; 1148 vx = vq; 1149 } 1150 if (vx == NULL) 1151 panic("missing alias"); 1152 if (vq == NULL) 1153 vx->v_flag &= ~VALIASED; 1154 vp->v_flag &= ~VALIASED; 1155 } 1156 FREE(vp->v_specinfo, M_VNODE); 1157 vp->v_specinfo = NULL; 1158 } 1159 /* 1160 * If it is on the freelist and not already at the head, move it to 1161 * the head of the list. The test of the back pointer and the 1162 * reference count of zero is because it will be removed from the free 1163 * list by getnewvnode, but will not have its reference count 1164 * incremented until after calling vgone. If the reference count were 1165 * incremented first, vgone would (incorrectly) try to close the 1166 * previous instance of the underlying object. So, the back pointer is 1167 * explicitly set to `0xdeadb' in getnewvnode after removing it from 1168 * the freelist to ensure that we do not try to move it here. 1169 */ 1170 if (vp->v_usecount == 0 && 1171 vp->v_freelist.tqe_prev != (struct vnode **) 0xdeadb && 1172 vnode_free_list.tqh_first != vp) { 1173 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 1174 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); 1175 } 1176 vp->v_type = VBAD; 1177} 1178 1179/* 1180 * Lookup a vnode by device number. 1181 */ 1182int 1183vfinddev(dev, type, vpp) 1184 dev_t dev; 1185 enum vtype type; 1186 struct vnode **vpp; 1187{ 1188 register struct vnode *vp; 1189 1190 for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) { 1191 if (dev != vp->v_rdev || type != vp->v_type) 1192 continue; 1193 *vpp = vp; 1194 return (1); 1195 } 1196 return (0); 1197} 1198 1199/* 1200 * Calculate the total number of references to a special device. 1201 */ 1202int 1203vcount(vp) 1204 register struct vnode *vp; 1205{ 1206 register struct vnode *vq, *vnext; 1207 int count; 1208 1209loop: 1210 if ((vp->v_flag & VALIASED) == 0) 1211 return (vp->v_usecount); 1212 for (count = 0, vq = *vp->v_hashchain; vq; vq = vnext) { 1213 vnext = vq->v_specnext; 1214 if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type) 1215 continue; 1216 /* 1217 * Alias, but not in use, so flush it out. 1218 */ 1219 if (vq->v_usecount == 0 && vq != vp) { 1220 vgone(vq); 1221 goto loop; 1222 } 1223 count += vq->v_usecount; 1224 } 1225 return (count); 1226} 1227 1228/* 1229 * Print out a description of a vnode. 1230 */ 1231static char *typename[] = 1232{"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD"}; 1233 1234void 1235vprint(label, vp) 1236 char *label; 1237 register struct vnode *vp; 1238{ 1239 char buf[64]; 1240 1241 if (label != NULL) 1242 printf("%s: ", label); 1243 printf("type %s, usecount %d, writecount %d, refcount %ld,", 1244 typename[vp->v_type], vp->v_usecount, vp->v_writecount, 1245 vp->v_holdcnt); 1246 buf[0] = '\0'; 1247 if (vp->v_flag & VROOT) 1248 strcat(buf, "|VROOT"); 1249 if (vp->v_flag & VTEXT) 1250 strcat(buf, "|VTEXT"); 1251 if (vp->v_flag & VSYSTEM) 1252 strcat(buf, "|VSYSTEM"); 1253 if (vp->v_flag & VXLOCK) 1254 strcat(buf, "|VXLOCK"); 1255 if (vp->v_flag & VXWANT) 1256 strcat(buf, "|VXWANT"); 1257 if (vp->v_flag & VBWAIT) 1258 strcat(buf, "|VBWAIT"); 1259 if (vp->v_flag & VALIASED) 1260 strcat(buf, "|VALIASED"); 1261 if (buf[0] != '\0') 1262 printf(" flags (%s)", &buf[1]); 1263 if (vp->v_data == NULL) { 1264 printf("\n"); 1265 } else { 1266 printf("\n\t"); 1267 VOP_PRINT(vp); 1268 } 1269} 1270 1271#ifdef DDB 1272/* 1273 * List all of the locked vnodes in the system. 1274 * Called when debugging the kernel. 1275 */ 1276void 1277printlockedvnodes(void) 1278{ 1279 register struct mount *mp; 1280 register struct vnode *vp; 1281 1282 printf("Locked vnodes\n"); 1283 for (mp = mountlist.cqh_first; mp != (void *)&mountlist; 1284 mp = mp->mnt_list.cqe_next) { 1285 for (vp = mp->mnt_vnodelist.lh_first; 1286 vp != NULL; 1287 vp = vp->v_mntvnodes.le_next) 1288 if (VOP_ISLOCKED(vp)) 1289 vprint((char *) 0, vp); 1290 } 1291} 1292#endif 1293 1294int kinfo_vdebug = 1; 1295int kinfo_vgetfailed; 1296 1297#define KINFO_VNODESLOP 10 1298/* 1299 * Dump vnode list (via sysctl). 1300 * Copyout address of vnode followed by vnode. 1301 */ 1302/* ARGSUSED */ 1303static int 1304sysctl_vnode SYSCTL_HANDLER_ARGS 1305{ 1306 register struct mount *mp, *nmp; 1307 struct vnode *vp; 1308 int error; 1309 1310#define VPTRSZ sizeof (struct vnode *) 1311#define VNODESZ sizeof (struct vnode) 1312 1313 req->lock = 0; 1314 if (!req->oldptr) /* Make an estimate */ 1315 return (SYSCTL_OUT(req, 0, 1316 (numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ))); 1317 1318 for (mp = mountlist.cqh_first; mp != (void *)&mountlist; mp = nmp) { 1319 nmp = mp->mnt_list.cqe_next; 1320 if (vfs_busy(mp)) 1321 continue; 1322again: 1323 for (vp = mp->mnt_vnodelist.lh_first; 1324 vp != NULL; 1325 vp = vp->v_mntvnodes.le_next) { 1326 /* 1327 * Check that the vp is still associated with this 1328 * filesystem. RACE: could have been recycled onto 1329 * the same filesystem. 1330 */ 1331 if (vp->v_mount != mp) { 1332 if (kinfo_vdebug) 1333 printf("kinfo: vp changed\n"); 1334 goto again; 1335 } 1336 if ((error = SYSCTL_OUT(req, &vp, VPTRSZ)) || 1337 (error = SYSCTL_OUT(req, vp, VNODESZ))) { 1338 vfs_unbusy(mp); 1339 return (error); 1340 } 1341 } 1342 vfs_unbusy(mp); 1343 } 1344 1345 return (0); 1346} 1347 1348SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE|CTLFLAG_RD, 1349 0, 0, sysctl_vnode, "S,vnode", ""); 1350 1351/* 1352 * Check to see if a filesystem is mounted on a block device. 1353 */ 1354int 1355vfs_mountedon(vp) 1356 register struct vnode *vp; 1357{ 1358 register struct vnode *vq; 1359 1360 if (vp->v_specflags & SI_MOUNTEDON) 1361 return (EBUSY); 1362 if (vp->v_flag & VALIASED) { 1363 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { 1364 if (vq->v_rdev != vp->v_rdev || 1365 vq->v_type != vp->v_type) 1366 continue; 1367 if (vq->v_specflags & SI_MOUNTEDON) 1368 return (EBUSY); 1369 } 1370 } 1371 return (0); 1372} 1373 1374/* 1375 * Build hash lists of net addresses and hang them off the mount point. 1376 * Called by ufs_mount() to set up the lists of export addresses. 1377 */ 1378static int 1379vfs_hang_addrlist(struct mount *mp, struct netexport *nep, 1380 struct export_args *argp) 1381{ 1382 register struct netcred *np; 1383 register struct radix_node_head *rnh; 1384 register int i; 1385 struct radix_node *rn; 1386 struct sockaddr *saddr, *smask = 0; 1387 struct domain *dom; 1388 int error; 1389 1390 if (argp->ex_addrlen == 0) { 1391 if (mp->mnt_flag & MNT_DEFEXPORTED) 1392 return (EPERM); 1393 np = &nep->ne_defexported; 1394 np->netc_exflags = argp->ex_flags; 1395 np->netc_anon = argp->ex_anon; 1396 np->netc_anon.cr_ref = 1; 1397 mp->mnt_flag |= MNT_DEFEXPORTED; 1398 return (0); 1399 } 1400 i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen; 1401 np = (struct netcred *) malloc(i, M_NETADDR, M_WAITOK); 1402 bzero((caddr_t) np, i); 1403 saddr = (struct sockaddr *) (np + 1); 1404 if ((error = copyin(argp->ex_addr, (caddr_t) saddr, argp->ex_addrlen))) 1405 goto out; 1406 if (saddr->sa_len > argp->ex_addrlen) 1407 saddr->sa_len = argp->ex_addrlen; 1408 if (argp->ex_masklen) { 1409 smask = (struct sockaddr *) ((caddr_t) saddr + argp->ex_addrlen); 1410 error = copyin(argp->ex_addr, (caddr_t) smask, argp->ex_masklen); 1411 if (error) 1412 goto out; 1413 if (smask->sa_len > argp->ex_masklen) 1414 smask->sa_len = argp->ex_masklen; 1415 } 1416 i = saddr->sa_family; 1417 if ((rnh = nep->ne_rtable[i]) == 0) { 1418 /* 1419 * Seems silly to initialize every AF when most are not used, 1420 * do so on demand here 1421 */ 1422 for (dom = domains; dom; dom = dom->dom_next) 1423 if (dom->dom_family == i && dom->dom_rtattach) { 1424 dom->dom_rtattach((void **) &nep->ne_rtable[i], 1425 dom->dom_rtoffset); 1426 break; 1427 } 1428 if ((rnh = nep->ne_rtable[i]) == 0) { 1429 error = ENOBUFS; 1430 goto out; 1431 } 1432 } 1433 rn = (*rnh->rnh_addaddr) ((caddr_t) saddr, (caddr_t) smask, rnh, 1434 np->netc_rnodes); 1435 if (rn == 0 || np != (struct netcred *) rn) { /* already exists */ 1436 error = EPERM; 1437 goto out; 1438 } 1439 np->netc_exflags = argp->ex_flags; 1440 np->netc_anon = argp->ex_anon; 1441 np->netc_anon.cr_ref = 1; 1442 return (0); 1443out: 1444 free(np, M_NETADDR); 1445 return (error); 1446} 1447 1448/* ARGSUSED */ 1449static int 1450vfs_free_netcred(struct radix_node *rn, void *w) 1451{ 1452 register struct radix_node_head *rnh = (struct radix_node_head *) w; 1453 1454 (*rnh->rnh_deladdr) (rn->rn_key, rn->rn_mask, rnh); 1455 free((caddr_t) rn, M_NETADDR); 1456 return (0); 1457} 1458 1459/* 1460 * Free the net address hash lists that are hanging off the mount points. 1461 */ 1462static void 1463vfs_free_addrlist(struct netexport *nep) 1464{ 1465 register int i; 1466 register struct radix_node_head *rnh; 1467 1468 for (i = 0; i <= AF_MAX; i++) 1469 if ((rnh = nep->ne_rtable[i])) { 1470 (*rnh->rnh_walktree) (rnh, vfs_free_netcred, 1471 (caddr_t) rnh); 1472 free((caddr_t) rnh, M_RTABLE); 1473 nep->ne_rtable[i] = 0; 1474 } 1475} 1476 1477int 1478vfs_export(mp, nep, argp) 1479 struct mount *mp; 1480 struct netexport *nep; 1481 struct export_args *argp; 1482{ 1483 int error; 1484 1485 if (argp->ex_flags & MNT_DELEXPORT) { 1486 vfs_free_addrlist(nep); 1487 mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED); 1488 } 1489 if (argp->ex_flags & MNT_EXPORTED) { 1490 if ((error = vfs_hang_addrlist(mp, nep, argp))) 1491 return (error); 1492 mp->mnt_flag |= MNT_EXPORTED; 1493 } 1494 return (0); 1495} 1496 1497struct netcred * 1498vfs_export_lookup(mp, nep, nam) 1499 register struct mount *mp; 1500 struct netexport *nep; 1501 struct mbuf *nam; 1502{ 1503 register struct netcred *np; 1504 register struct radix_node_head *rnh; 1505 struct sockaddr *saddr; 1506 1507 np = NULL; 1508 if (mp->mnt_flag & MNT_EXPORTED) { 1509 /* 1510 * Lookup in the export list first. 1511 */ 1512 if (nam != NULL) { 1513 saddr = mtod(nam, struct sockaddr *); 1514 rnh = nep->ne_rtable[saddr->sa_family]; 1515 if (rnh != NULL) { 1516 np = (struct netcred *) 1517 (*rnh->rnh_matchaddr) ((caddr_t) saddr, 1518 rnh); 1519 if (np && np->netc_rnodes->rn_flags & RNF_ROOT) 1520 np = NULL; 1521 } 1522 } 1523 /* 1524 * If no address match, use the default if it exists. 1525 */ 1526 if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED) 1527 np = &nep->ne_defexported; 1528 } 1529 return (np); 1530} 1531 1532 1533/* 1534 * perform msync on all vnodes under a mount point 1535 * the mount point must be locked. 1536 */ 1537void 1538vfs_msync(struct mount *mp, int flags) { 1539 struct vnode *vp, *nvp; 1540loop: 1541 for (vp = mp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) { 1542 1543 if (vp->v_mount != mp) 1544 goto loop; 1545 nvp = vp->v_mntvnodes.le_next; 1546 if (VOP_ISLOCKED(vp) && (flags != MNT_WAIT)) 1547 continue; 1548 if (vp->v_object &&
| 986 /* 987 * With v_usecount == 0, all we need to do is clear out the 988 * vnode data structures and we are done. 989 */ 990 if (vp->v_usecount == 0) { 991 vgone(vp); 992 continue; 993 } 994 /* 995 * If FORCECLOSE is set, forcibly close the vnode. For block 996 * or character devices, revert to an anonymous device. For 997 * all other files, just kill them. 998 */ 999 if (flags & FORCECLOSE) { 1000 if (vp->v_type != VBLK && vp->v_type != VCHR) { 1001 vgone(vp); 1002 } else { 1003 vclean(vp, 0); 1004 vp->v_op = spec_vnodeop_p; 1005 insmntque(vp, (struct mount *) 0); 1006 } 1007 continue; 1008 } 1009#ifdef DIAGNOSTIC 1010 if (busyprt) 1011 vprint("vflush: busy vnode", vp); 1012#endif 1013 busy++; 1014 } 1015 if (busy) 1016 return (EBUSY); 1017 return (0); 1018} 1019 1020/* 1021 * Disassociate the underlying file system from a vnode. 1022 */ 1023void 1024vclean(struct vnode *vp, int flags) 1025{ 1026 int active; 1027 1028 /* 1029 * Check to see if the vnode is in use. If so we have to reference it 1030 * before we clean it out so that its count cannot fall to zero and 1031 * generate a race against ourselves to recycle it. 1032 */ 1033 if ((active = vp->v_usecount)) 1034 VREF(vp); 1035 /* 1036 * Even if the count is zero, the VOP_INACTIVE routine may still have 1037 * the object locked while it cleans it out. The VOP_LOCK ensures that 1038 * the VOP_INACTIVE routine is done with its work. For active vnodes, 1039 * it ensures that no other activity can occur while the underlying 1040 * object is being cleaned out. 1041 */ 1042 VOP_LOCK(vp); 1043 /* 1044 * Prevent the vnode from being recycled or brought into use while we 1045 * clean it out. 1046 */ 1047 if (vp->v_flag & VXLOCK) 1048 panic("vclean: deadlock"); 1049 vp->v_flag |= VXLOCK; 1050 /* 1051 * Clean out any buffers associated with the vnode. 1052 */ 1053 if (flags & DOCLOSE) 1054 vinvalbuf(vp, V_SAVE, NOCRED, NULL, 0, 0); 1055 /* 1056 * Any other processes trying to obtain this lock must first wait for 1057 * VXLOCK to clear, then call the new lock operation. 1058 */ 1059 VOP_UNLOCK(vp); 1060 /* 1061 * If purging an active vnode, it must be closed and deactivated 1062 * before being reclaimed. 1063 */ 1064 if (active) { 1065 if (flags & DOCLOSE) 1066 VOP_CLOSE(vp, FNONBLOCK, NOCRED, NULL); 1067 VOP_INACTIVE(vp); 1068 } 1069 /* 1070 * Reclaim the vnode. 1071 */ 1072 if (VOP_RECLAIM(vp)) 1073 panic("vclean: cannot reclaim"); 1074 if (active) 1075 vrele(vp); 1076 1077 /* 1078 * Done with purge, notify sleepers of the grim news. 1079 */ 1080 vp->v_op = dead_vnodeop_p; 1081 vp->v_tag = VT_NON; 1082 vp->v_flag &= ~VXLOCK; 1083 if (vp->v_flag & VXWANT) { 1084 vp->v_flag &= ~VXWANT; 1085 wakeup((caddr_t) vp); 1086 } 1087} 1088 1089/* 1090 * Eliminate all activity associated with the requested vnode 1091 * and with all vnodes aliased to the requested vnode. 1092 */ 1093void 1094vgoneall(vp) 1095 register struct vnode *vp; 1096{ 1097 register struct vnode *vq; 1098 1099 if (vp->v_flag & VALIASED) { 1100 /* 1101 * If a vgone (or vclean) is already in progress, wait until 1102 * it is done and return. 1103 */ 1104 if (vp->v_flag & VXLOCK) { 1105 vp->v_flag |= VXWANT; 1106 (void) tsleep((caddr_t) vp, PINOD, "vgall", 0); 1107 return; 1108 } 1109 /* 1110 * Ensure that vp will not be vgone'd while we are eliminating 1111 * its aliases. 1112 */ 1113 vp->v_flag |= VXLOCK; 1114 while (vp->v_flag & VALIASED) { 1115 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { 1116 if (vq->v_rdev != vp->v_rdev || 1117 vq->v_type != vp->v_type || vp == vq) 1118 continue; 1119 vgone(vq); 1120 break; 1121 } 1122 } 1123 /* 1124 * Remove the lock so that vgone below will really eliminate 1125 * the vnode after which time vgone will awaken any sleepers. 1126 */ 1127 vp->v_flag &= ~VXLOCK; 1128 } 1129 vgone(vp); 1130} 1131 1132/* 1133 * Eliminate all activity associated with a vnode 1134 * in preparation for reuse. 1135 */ 1136void 1137vgone(vp) 1138 register struct vnode *vp; 1139{ 1140 register struct vnode *vq; 1141 struct vnode *vx; 1142 1143 /* 1144 * If a vgone (or vclean) is already in progress, wait until it is 1145 * done and return. 1146 */ 1147 if (vp->v_flag & VXLOCK) { 1148 vp->v_flag |= VXWANT; 1149 (void) tsleep((caddr_t) vp, PINOD, "vgone", 0); 1150 return; 1151 } 1152 /* 1153 * Clean out the filesystem specific data. 1154 */ 1155 vclean(vp, DOCLOSE); 1156 /* 1157 * Delete from old mount point vnode list, if on one. 1158 */ 1159 if (vp->v_mount != NULL) { 1160 LIST_REMOVE(vp, v_mntvnodes); 1161 vp->v_mount = NULL; 1162 } 1163 /* 1164 * If special device, remove it from special device alias list. 1165 */ 1166 if (vp->v_type == VBLK || vp->v_type == VCHR) { 1167 if (*vp->v_hashchain == vp) { 1168 *vp->v_hashchain = vp->v_specnext; 1169 } else { 1170 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { 1171 if (vq->v_specnext != vp) 1172 continue; 1173 vq->v_specnext = vp->v_specnext; 1174 break; 1175 } 1176 if (vq == NULL) 1177 panic("missing bdev"); 1178 } 1179 if (vp->v_flag & VALIASED) { 1180 vx = NULL; 1181 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { 1182 if (vq->v_rdev != vp->v_rdev || 1183 vq->v_type != vp->v_type) 1184 continue; 1185 if (vx) 1186 break; 1187 vx = vq; 1188 } 1189 if (vx == NULL) 1190 panic("missing alias"); 1191 if (vq == NULL) 1192 vx->v_flag &= ~VALIASED; 1193 vp->v_flag &= ~VALIASED; 1194 } 1195 FREE(vp->v_specinfo, M_VNODE); 1196 vp->v_specinfo = NULL; 1197 } 1198 /* 1199 * If it is on the freelist and not already at the head, move it to 1200 * the head of the list. The test of the back pointer and the 1201 * reference count of zero is because it will be removed from the free 1202 * list by getnewvnode, but will not have its reference count 1203 * incremented until after calling vgone. If the reference count were 1204 * incremented first, vgone would (incorrectly) try to close the 1205 * previous instance of the underlying object. So, the back pointer is 1206 * explicitly set to `0xdeadb' in getnewvnode after removing it from 1207 * the freelist to ensure that we do not try to move it here. 1208 */ 1209 if (vp->v_usecount == 0 && 1210 vp->v_freelist.tqe_prev != (struct vnode **) 0xdeadb && 1211 vnode_free_list.tqh_first != vp) { 1212 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); 1213 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); 1214 } 1215 vp->v_type = VBAD; 1216} 1217 1218/* 1219 * Lookup a vnode by device number. 1220 */ 1221int 1222vfinddev(dev, type, vpp) 1223 dev_t dev; 1224 enum vtype type; 1225 struct vnode **vpp; 1226{ 1227 register struct vnode *vp; 1228 1229 for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) { 1230 if (dev != vp->v_rdev || type != vp->v_type) 1231 continue; 1232 *vpp = vp; 1233 return (1); 1234 } 1235 return (0); 1236} 1237 1238/* 1239 * Calculate the total number of references to a special device. 1240 */ 1241int 1242vcount(vp) 1243 register struct vnode *vp; 1244{ 1245 register struct vnode *vq, *vnext; 1246 int count; 1247 1248loop: 1249 if ((vp->v_flag & VALIASED) == 0) 1250 return (vp->v_usecount); 1251 for (count = 0, vq = *vp->v_hashchain; vq; vq = vnext) { 1252 vnext = vq->v_specnext; 1253 if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type) 1254 continue; 1255 /* 1256 * Alias, but not in use, so flush it out. 1257 */ 1258 if (vq->v_usecount == 0 && vq != vp) { 1259 vgone(vq); 1260 goto loop; 1261 } 1262 count += vq->v_usecount; 1263 } 1264 return (count); 1265} 1266 1267/* 1268 * Print out a description of a vnode. 1269 */ 1270static char *typename[] = 1271{"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD"}; 1272 1273void 1274vprint(label, vp) 1275 char *label; 1276 register struct vnode *vp; 1277{ 1278 char buf[64]; 1279 1280 if (label != NULL) 1281 printf("%s: ", label); 1282 printf("type %s, usecount %d, writecount %d, refcount %ld,", 1283 typename[vp->v_type], vp->v_usecount, vp->v_writecount, 1284 vp->v_holdcnt); 1285 buf[0] = '\0'; 1286 if (vp->v_flag & VROOT) 1287 strcat(buf, "|VROOT"); 1288 if (vp->v_flag & VTEXT) 1289 strcat(buf, "|VTEXT"); 1290 if (vp->v_flag & VSYSTEM) 1291 strcat(buf, "|VSYSTEM"); 1292 if (vp->v_flag & VXLOCK) 1293 strcat(buf, "|VXLOCK"); 1294 if (vp->v_flag & VXWANT) 1295 strcat(buf, "|VXWANT"); 1296 if (vp->v_flag & VBWAIT) 1297 strcat(buf, "|VBWAIT"); 1298 if (vp->v_flag & VALIASED) 1299 strcat(buf, "|VALIASED"); 1300 if (buf[0] != '\0') 1301 printf(" flags (%s)", &buf[1]); 1302 if (vp->v_data == NULL) { 1303 printf("\n"); 1304 } else { 1305 printf("\n\t"); 1306 VOP_PRINT(vp); 1307 } 1308} 1309 1310#ifdef DDB 1311/* 1312 * List all of the locked vnodes in the system. 1313 * Called when debugging the kernel. 1314 */ 1315void 1316printlockedvnodes(void) 1317{ 1318 register struct mount *mp; 1319 register struct vnode *vp; 1320 1321 printf("Locked vnodes\n"); 1322 for (mp = mountlist.cqh_first; mp != (void *)&mountlist; 1323 mp = mp->mnt_list.cqe_next) { 1324 for (vp = mp->mnt_vnodelist.lh_first; 1325 vp != NULL; 1326 vp = vp->v_mntvnodes.le_next) 1327 if (VOP_ISLOCKED(vp)) 1328 vprint((char *) 0, vp); 1329 } 1330} 1331#endif 1332 1333int kinfo_vdebug = 1; 1334int kinfo_vgetfailed; 1335 1336#define KINFO_VNODESLOP 10 1337/* 1338 * Dump vnode list (via sysctl). 1339 * Copyout address of vnode followed by vnode. 1340 */ 1341/* ARGSUSED */ 1342static int 1343sysctl_vnode SYSCTL_HANDLER_ARGS 1344{ 1345 register struct mount *mp, *nmp; 1346 struct vnode *vp; 1347 int error; 1348 1349#define VPTRSZ sizeof (struct vnode *) 1350#define VNODESZ sizeof (struct vnode) 1351 1352 req->lock = 0; 1353 if (!req->oldptr) /* Make an estimate */ 1354 return (SYSCTL_OUT(req, 0, 1355 (numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ))); 1356 1357 for (mp = mountlist.cqh_first; mp != (void *)&mountlist; mp = nmp) { 1358 nmp = mp->mnt_list.cqe_next; 1359 if (vfs_busy(mp)) 1360 continue; 1361again: 1362 for (vp = mp->mnt_vnodelist.lh_first; 1363 vp != NULL; 1364 vp = vp->v_mntvnodes.le_next) { 1365 /* 1366 * Check that the vp is still associated with this 1367 * filesystem. RACE: could have been recycled onto 1368 * the same filesystem. 1369 */ 1370 if (vp->v_mount != mp) { 1371 if (kinfo_vdebug) 1372 printf("kinfo: vp changed\n"); 1373 goto again; 1374 } 1375 if ((error = SYSCTL_OUT(req, &vp, VPTRSZ)) || 1376 (error = SYSCTL_OUT(req, vp, VNODESZ))) { 1377 vfs_unbusy(mp); 1378 return (error); 1379 } 1380 } 1381 vfs_unbusy(mp); 1382 } 1383 1384 return (0); 1385} 1386 1387SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE|CTLFLAG_RD, 1388 0, 0, sysctl_vnode, "S,vnode", ""); 1389 1390/* 1391 * Check to see if a filesystem is mounted on a block device. 1392 */ 1393int 1394vfs_mountedon(vp) 1395 register struct vnode *vp; 1396{ 1397 register struct vnode *vq; 1398 1399 if (vp->v_specflags & SI_MOUNTEDON) 1400 return (EBUSY); 1401 if (vp->v_flag & VALIASED) { 1402 for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { 1403 if (vq->v_rdev != vp->v_rdev || 1404 vq->v_type != vp->v_type) 1405 continue; 1406 if (vq->v_specflags & SI_MOUNTEDON) 1407 return (EBUSY); 1408 } 1409 } 1410 return (0); 1411} 1412 1413/* 1414 * Build hash lists of net addresses and hang them off the mount point. 1415 * Called by ufs_mount() to set up the lists of export addresses. 1416 */ 1417static int 1418vfs_hang_addrlist(struct mount *mp, struct netexport *nep, 1419 struct export_args *argp) 1420{ 1421 register struct netcred *np; 1422 register struct radix_node_head *rnh; 1423 register int i; 1424 struct radix_node *rn; 1425 struct sockaddr *saddr, *smask = 0; 1426 struct domain *dom; 1427 int error; 1428 1429 if (argp->ex_addrlen == 0) { 1430 if (mp->mnt_flag & MNT_DEFEXPORTED) 1431 return (EPERM); 1432 np = &nep->ne_defexported; 1433 np->netc_exflags = argp->ex_flags; 1434 np->netc_anon = argp->ex_anon; 1435 np->netc_anon.cr_ref = 1; 1436 mp->mnt_flag |= MNT_DEFEXPORTED; 1437 return (0); 1438 } 1439 i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen; 1440 np = (struct netcred *) malloc(i, M_NETADDR, M_WAITOK); 1441 bzero((caddr_t) np, i); 1442 saddr = (struct sockaddr *) (np + 1); 1443 if ((error = copyin(argp->ex_addr, (caddr_t) saddr, argp->ex_addrlen))) 1444 goto out; 1445 if (saddr->sa_len > argp->ex_addrlen) 1446 saddr->sa_len = argp->ex_addrlen; 1447 if (argp->ex_masklen) { 1448 smask = (struct sockaddr *) ((caddr_t) saddr + argp->ex_addrlen); 1449 error = copyin(argp->ex_addr, (caddr_t) smask, argp->ex_masklen); 1450 if (error) 1451 goto out; 1452 if (smask->sa_len > argp->ex_masklen) 1453 smask->sa_len = argp->ex_masklen; 1454 } 1455 i = saddr->sa_family; 1456 if ((rnh = nep->ne_rtable[i]) == 0) { 1457 /* 1458 * Seems silly to initialize every AF when most are not used, 1459 * do so on demand here 1460 */ 1461 for (dom = domains; dom; dom = dom->dom_next) 1462 if (dom->dom_family == i && dom->dom_rtattach) { 1463 dom->dom_rtattach((void **) &nep->ne_rtable[i], 1464 dom->dom_rtoffset); 1465 break; 1466 } 1467 if ((rnh = nep->ne_rtable[i]) == 0) { 1468 error = ENOBUFS; 1469 goto out; 1470 } 1471 } 1472 rn = (*rnh->rnh_addaddr) ((caddr_t) saddr, (caddr_t) smask, rnh, 1473 np->netc_rnodes); 1474 if (rn == 0 || np != (struct netcred *) rn) { /* already exists */ 1475 error = EPERM; 1476 goto out; 1477 } 1478 np->netc_exflags = argp->ex_flags; 1479 np->netc_anon = argp->ex_anon; 1480 np->netc_anon.cr_ref = 1; 1481 return (0); 1482out: 1483 free(np, M_NETADDR); 1484 return (error); 1485} 1486 1487/* ARGSUSED */ 1488static int 1489vfs_free_netcred(struct radix_node *rn, void *w) 1490{ 1491 register struct radix_node_head *rnh = (struct radix_node_head *) w; 1492 1493 (*rnh->rnh_deladdr) (rn->rn_key, rn->rn_mask, rnh); 1494 free((caddr_t) rn, M_NETADDR); 1495 return (0); 1496} 1497 1498/* 1499 * Free the net address hash lists that are hanging off the mount points. 1500 */ 1501static void 1502vfs_free_addrlist(struct netexport *nep) 1503{ 1504 register int i; 1505 register struct radix_node_head *rnh; 1506 1507 for (i = 0; i <= AF_MAX; i++) 1508 if ((rnh = nep->ne_rtable[i])) { 1509 (*rnh->rnh_walktree) (rnh, vfs_free_netcred, 1510 (caddr_t) rnh); 1511 free((caddr_t) rnh, M_RTABLE); 1512 nep->ne_rtable[i] = 0; 1513 } 1514} 1515 1516int 1517vfs_export(mp, nep, argp) 1518 struct mount *mp; 1519 struct netexport *nep; 1520 struct export_args *argp; 1521{ 1522 int error; 1523 1524 if (argp->ex_flags & MNT_DELEXPORT) { 1525 vfs_free_addrlist(nep); 1526 mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED); 1527 } 1528 if (argp->ex_flags & MNT_EXPORTED) { 1529 if ((error = vfs_hang_addrlist(mp, nep, argp))) 1530 return (error); 1531 mp->mnt_flag |= MNT_EXPORTED; 1532 } 1533 return (0); 1534} 1535 1536struct netcred * 1537vfs_export_lookup(mp, nep, nam) 1538 register struct mount *mp; 1539 struct netexport *nep; 1540 struct mbuf *nam; 1541{ 1542 register struct netcred *np; 1543 register struct radix_node_head *rnh; 1544 struct sockaddr *saddr; 1545 1546 np = NULL; 1547 if (mp->mnt_flag & MNT_EXPORTED) { 1548 /* 1549 * Lookup in the export list first. 1550 */ 1551 if (nam != NULL) { 1552 saddr = mtod(nam, struct sockaddr *); 1553 rnh = nep->ne_rtable[saddr->sa_family]; 1554 if (rnh != NULL) { 1555 np = (struct netcred *) 1556 (*rnh->rnh_matchaddr) ((caddr_t) saddr, 1557 rnh); 1558 if (np && np->netc_rnodes->rn_flags & RNF_ROOT) 1559 np = NULL; 1560 } 1561 } 1562 /* 1563 * If no address match, use the default if it exists. 1564 */ 1565 if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED) 1566 np = &nep->ne_defexported; 1567 } 1568 return (np); 1569} 1570 1571 1572/* 1573 * perform msync on all vnodes under a mount point 1574 * the mount point must be locked. 1575 */ 1576void 1577vfs_msync(struct mount *mp, int flags) { 1578 struct vnode *vp, *nvp; 1579loop: 1580 for (vp = mp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) { 1581 1582 if (vp->v_mount != mp) 1583 goto loop; 1584 nvp = vp->v_mntvnodes.le_next; 1585 if (VOP_ISLOCKED(vp) && (flags != MNT_WAIT)) 1586 continue; 1587 if (vp->v_object &&
|
1549 (((vm_object_t) vp->v_object)->flags & OBJ_MIGHTBEDIRTY)) {
| 1588 (vp->v_object->flags & OBJ_MIGHTBEDIRTY)) {
|
1550 vm_object_page_clean(vp->v_object, 0, 0, TRUE, TRUE); 1551 } 1552 } 1553}
| 1589 vm_object_page_clean(vp->v_object, 0, 0, TRUE, TRUE); 1590 } 1591 } 1592}
|
| 1593 1594/* 1595 * Create the VM object needed for VMIO and mmap support. This 1596 * is done for all VREG files in the system. Some filesystems might 1597 * afford the additional metadata buffering capability of the 1598 * VMIO code by making the device node be VMIO mode also. 1599 */ 1600int 1601vfs_object_create(vp, p, cred, waslocked) 1602 struct vnode *vp; 1603 struct proc *p; 1604 struct ucred *cred; 1605 int waslocked; 1606{ 1607 struct vattr vat; 1608 vm_object_t object; 1609 int error = 0; 1610 1611retry: 1612 if ((object = vp->v_object) == NULL) { 1613 if (vp->v_type == VREG) { 1614 if ((error = VOP_GETATTR(vp, &vat, cred, p)) != 0) 1615 goto retn; 1616 (void) vnode_pager_alloc(vp, 1617 OFF_TO_IDX(round_page(vat.va_size)), 0, 0); 1618 } else { 1619 /* 1620 * This simply allocates the biggest object possible 1621 * for a VBLK vnode. This should be fixed, but doesn't 1622 * cause any problems (yet). 1623 */ 1624 (void) vnode_pager_alloc(vp, INT_MAX, 0, 0); 1625 } 1626 vp->v_object->flags |= OBJ_VFS_REF; 1627 } else { 1628 if (object->flags & OBJ_DEAD) { 1629 if (waslocked) 1630 VOP_UNLOCK(vp); 1631 tsleep(object, PVM, "vodead", 0); 1632 if (waslocked) 1633 VOP_LOCK(vp); 1634 goto retry; 1635 } 1636 if ((object->flags & OBJ_VFS_REF) == 0) { 1637 object->flags |= OBJ_VFS_REF; 1638 vm_object_reference(object); 1639 } 1640 } 1641 if (vp->v_object) 1642 vp->v_flag |= VVMIO; 1643 1644retn: 1645 return error; 1646}
|
| |