ffs_vfsops.c revision 37520
1/* 2 * Copyright (c) 1989, 1991, 1993, 1994 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * @(#)ffs_vfsops.c 8.31 (Berkeley) 5/20/95 34 * $Id: ffs_vfsops.c,v 1.83 1998/06/04 17:21:39 dfr Exp $ 35 */ 36 37#include "opt_devfs.h" /* for SLICE */ 38#include "opt_quota.h" 39 40#include <sys/param.h> 41#include <sys/systm.h> 42#include <sys/namei.h> 43#include <sys/proc.h> 44#include <sys/kernel.h> 45#include <sys/vnode.h> 46#include <sys/mount.h> 47#include <sys/buf.h> 48#include <sys/conf.h> 49#include <sys/fcntl.h> 50#include <sys/disklabel.h> 51#include <sys/malloc.h> 52 53#include <miscfs/specfs/specdev.h> 54 55#include <ufs/ufs/quota.h> 56#include <ufs/ufs/ufsmount.h> 57#include <ufs/ufs/inode.h> 58#include <ufs/ufs/ufs_extern.h> 59 60#include <ufs/ffs/fs.h> 61#include <ufs/ffs/ffs_extern.h> 62 63#include <vm/vm.h> 64#include <vm/vm_prot.h> 65#include <vm/vm_page.h> 66#include <vm/vm_extern.h> 67#include <vm/vm_object.h> 68 69static MALLOC_DEFINE(M_FFSNODE, "FFS node", "FFS vnode private part"); 70 71static int ffs_sbupdate __P((struct ufsmount *, int)); 72static int ffs_reload __P((struct mount *,struct ucred *,struct proc *)); 73static int ffs_oldfscompat __P((struct fs *)); 74static int ffs_mount __P((struct mount *, char *, caddr_t, 75 struct nameidata *, struct proc *)); 76static int ffs_init __P((struct vfsconf *)); 77 78static struct vfsops ufs_vfsops = { 79 ffs_mount, 80 ufs_start, 81 ffs_unmount, 82 ufs_root, 83 ufs_quotactl, 84 ffs_statfs, 85 ffs_sync, 86 ffs_vget, 87 ffs_fhtovp, 88 ffs_vptofh, 89 ffs_init, 90}; 91 92VFS_SET(ufs_vfsops, ufs, MOUNT_UFS, 0); 93 94/* 95 * ffs_mount 96 * 97 * Called when mounting local physical media 98 * 99 * PARAMETERS: 100 * mountroot 101 * mp mount point structure 102 * path NULL (flag for root mount!!!) 103 * data <unused> 104 * ndp <unused> 105 * p process (user credentials check [statfs]) 106 * 107 * mount 108 * mp mount point structure 109 * path path to mount point 110 * data pointer to argument struct in user space 111 * ndp mount point namei() return (used for 112 * credentials on reload), reused to look 113 * up block device. 114 * p process (user credentials check) 115 * 116 * RETURNS: 0 Success 117 * !0 error number (errno.h) 118 * 119 * LOCK STATE: 120 * 121 * ENTRY 122 * mount point is locked 123 * EXIT 124 * mount point is locked 125 * 126 * NOTES: 127 * A NULL path can be used for a flag since the mount 128 * system call will fail with EFAULT in copyinstr in 129 * namei() if it is a genuine NULL from the user. 130 */ 131#ifdef SLICE 132extern struct vnode *root_device_vnode; 133#endif 134static int 135ffs_mount( mp, path, data, ndp, p) 136 struct mount *mp; /* mount struct pointer*/ 137 char *path; /* path to mount point*/ 138 caddr_t data; /* arguments to FS specific mount*/ 139 struct nameidata *ndp; /* mount point credentials*/ 140 struct proc *p; /* process requesting mount*/ 141{ 142 size_t size; 143 int err = 0; 144 struct vnode *devvp; 145 146 struct ufs_args args; 147 struct ufsmount *ump = 0; 148 register struct fs *fs; 149 int error, flags; 150 mode_t accessmode; 151 int ronly = 0; 152 153 /* 154 * Use NULL path to flag a root mount 155 */ 156 if( path == NULL) { 157 /* 158 *** 159 * Mounting root file system 160 *** 161 */ 162 163#ifdef SLICE 164 rootvp = root_device_vnode; 165 if (rootvp == NULL) { 166 printf("ffs_mountroot: rootvp not set"); 167 return (EINVAL); 168 } 169#else /* !SLICE */ 170 if ((err = bdevvp(rootdev, &rootvp))) { 171 printf("ffs_mountroot: can't find rootvp"); 172 return (err); 173 } 174 175 if (bdevsw[major(rootdev)]->d_flags & D_NOCLUSTERR) 176 mp->mnt_flag |= MNT_NOCLUSTERR; 177 if (bdevsw[major(rootdev)]->d_flags & D_NOCLUSTERW) 178 mp->mnt_flag |= MNT_NOCLUSTERW; 179#endif /* !SLICE */ 180 if( ( err = ffs_mountfs(rootvp, mp, p, M_FFSNODE)) != 0) { 181 /* fs specific cleanup (if any)*/ 182 goto error_1; 183 } 184 185 goto dostatfs; /* success*/ 186 187 } 188 189 /* 190 *** 191 * Mounting non-root file system or updating a file system 192 *** 193 */ 194 195 /* copy in user arguments*/ 196 err = copyin(data, (caddr_t)&args, sizeof (struct ufs_args)); 197 if (err) 198 goto error_1; /* can't get arguments*/ 199 200 /* 201 * If updating, check whether changing from read-only to 202 * read/write; if there is no device name, that's all we do. 203 * Disallow clearing MNT_NOCLUSTERR and MNT_NOCLUSTERW flags, 204 * if block device requests. 205 */ 206 if (mp->mnt_flag & MNT_UPDATE) { 207 ump = VFSTOUFS(mp); 208 fs = ump->um_fs; 209 devvp = ump->um_devvp; 210 err = 0; 211 ronly = fs->fs_ronly; /* MNT_RELOAD might change this */ 212 if (bdevsw[major(ump->um_dev)]->d_flags & D_NOCLUSTERR) 213 mp->mnt_flag |= MNT_NOCLUSTERR; 214 if (bdevsw[major(ump->um_dev)]->d_flags & D_NOCLUSTERW) 215 mp->mnt_flag |= MNT_NOCLUSTERW; 216 if (fs->fs_ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) { 217 flags = WRITECLOSE; 218 if (mp->mnt_flag & MNT_FORCE) 219 flags |= FORCECLOSE; 220 if (mp->mnt_flag & MNT_SOFTDEP) { 221 err = softdep_flushfiles(mp, flags, p); 222 } else { 223 err = ffs_flushfiles(mp, flags, p); 224 } 225 } 226 if (!err && (mp->mnt_flag & MNT_RELOAD)) 227 err = ffs_reload(mp, ndp->ni_cnd.cn_cred, p); 228 if (err) { 229 goto error_1; 230 } 231 if (ronly && (mp->mnt_kern_flag & MNTK_WANTRDWR)) { 232 if (!fs->fs_clean) { 233 if (mp->mnt_flag & MNT_FORCE) { 234 printf("WARNING: %s was not properly dismounted.\n",fs->fs_fsmnt); 235 } else { 236 printf("WARNING: R/W mount of %s denied. Filesystem is not clean - run fsck.\n", 237 fs->fs_fsmnt); 238 err = EPERM; 239 goto error_1; 240 } 241 } 242 243 /* 244 * If upgrade to read-write by non-root, then verify 245 * that user has necessary permissions on the device. 246 */ 247 if (p->p_ucred->cr_uid != 0) { 248 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p); 249 if (error = VOP_ACCESS(devvp, VREAD | VWRITE, 250 p->p_ucred, p)) { 251 VOP_UNLOCK(devvp, 0, p); 252 return (error); 253 } 254 VOP_UNLOCK(devvp, 0, p); 255 } 256 257 /* check to see if we need to start softdep */ 258 if (fs->fs_flags & FS_DOSOFTDEP) { 259 err = softdep_mount(devvp, mp, fs, p->p_ucred); 260 if (err) 261 goto error_1; 262 } 263 264 ronly = 0; 265 } 266 /* 267 * Soft updates is incompatible with "async", 268 * so if we are doing softupdates stop the user 269 * from setting the async flag in an update. 270 * Softdep_mount() clears it in an initial mount 271 * or ro->rw remount. 272 */ 273 if (mp->mnt_flag & MNT_SOFTDEP) { 274 mp->mnt_flag &= ~MNT_ASYNC; 275 } 276 /* if not updating name...*/ 277 if (args.fspec == 0) { 278 /* 279 * Process export requests. Jumping to "success" 280 * will return the vfs_export() error code. 281 */ 282 err = vfs_export(mp, &ump->um_export, &args.export); 283 goto success; 284 } 285 } 286 287 /* 288 * Not an update, or updating the name: look up the name 289 * and verify that it refers to a sensible block device. 290 */ 291 NDINIT(ndp, LOOKUP, FOLLOW, UIO_USERSPACE, args.fspec, p); 292 err = namei(ndp); 293 if (err) { 294 /* can't get devvp!*/ 295 goto error_1; 296 } 297 298 devvp = ndp->ni_vp; 299 300 if (devvp->v_type != VBLK) { 301 err = ENOTBLK; 302 goto error_2; 303 } 304 if (major(devvp->v_rdev) >= nblkdev) { 305 err = ENXIO; 306 goto error_2; 307 } 308 309 /* 310 * If mount by non-root, then verify that user has necessary 311 * permissions on the device. 312 */ 313 if (p->p_ucred->cr_uid != 0) { 314 accessmode = VREAD; 315 if ((mp->mnt_flag & MNT_RDONLY) == 0) 316 accessmode |= VWRITE; 317 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p); 318 if (error = VOP_ACCESS(devvp, accessmode, p->p_ucred, p)) { 319 vput(devvp); 320 return (error); 321 } 322 VOP_UNLOCK(devvp, 0, p); 323 } 324 325 if (mp->mnt_flag & MNT_UPDATE) { 326 /* 327 ******************** 328 * UPDATE 329 * If it's not the same vnode, or at least the same device 330 * then it's not correct. 331 ******************** 332 */ 333 334 if (devvp != ump->um_devvp) { 335 if ( devvp->v_rdev == ump->um_devvp->v_rdev) { 336 vrele(devvp); 337 } else { 338 err = EINVAL; /* needs translation */ 339 } 340 } else 341 vrele(devvp); 342 /* 343 * Update device name only on success 344 */ 345 if( !err) { 346 /* Save "mounted from" info for mount point (NULL pad)*/ 347 copyinstr( args.fspec, 348 mp->mnt_stat.f_mntfromname, 349 MNAMELEN - 1, 350 &size); 351 bzero( mp->mnt_stat.f_mntfromname + size, MNAMELEN - size); 352 } 353 } else { 354 /* 355 ******************** 356 * NEW MOUNT 357 ******************** 358 */ 359 360 if (bdevsw[major(devvp->v_rdev)]->d_flags & D_NOCLUSTERR) 361 mp->mnt_flag |= MNT_NOCLUSTERR; 362 if (bdevsw[major(devvp->v_rdev)]->d_flags & D_NOCLUSTERW) 363 mp->mnt_flag |= MNT_NOCLUSTERW; 364 365 /* 366 * Since this is a new mount, we want the names for 367 * the device and the mount point copied in. If an 368 * error occurs, the mountpoint is discarded by the 369 * upper level code. 370 */ 371 /* Save "last mounted on" info for mount point (NULL pad)*/ 372 copyinstr( path, /* mount point*/ 373 mp->mnt_stat.f_mntonname, /* save area*/ 374 MNAMELEN - 1, /* max size*/ 375 &size); /* real size*/ 376 bzero( mp->mnt_stat.f_mntonname + size, MNAMELEN - size); 377 378 /* Save "mounted from" info for mount point (NULL pad)*/ 379 copyinstr( args.fspec, /* device name*/ 380 mp->mnt_stat.f_mntfromname, /* save area*/ 381 MNAMELEN - 1, /* max size*/ 382 &size); /* real size*/ 383 bzero( mp->mnt_stat.f_mntfromname + size, MNAMELEN - size); 384 385 err = ffs_mountfs(devvp, mp, p, M_FFSNODE); 386 } 387 if (err) { 388 goto error_2; 389 } 390 391dostatfs: 392 /* 393 * Initialize FS stat information in mount struct; uses both 394 * mp->mnt_stat.f_mntonname and mp->mnt_stat.f_mntfromname 395 * 396 * This code is common to root and non-root mounts 397 */ 398 (void)VFS_STATFS(mp, &mp->mnt_stat, p); 399 400 goto success; 401 402 403error_2: /* error with devvp held*/ 404 405 /* release devvp before failing*/ 406 vrele(devvp); 407 408error_1: /* no state to back out*/ 409 410success: 411 if (!err && path && (mp->mnt_flag & MNT_UPDATE)) { 412 /* update superblock after ro -> rw update */ 413 fs = ump->um_fs; 414 if (!ronly && fs->fs_ronly) { 415 fs->fs_ronly = 0; 416 fs->fs_clean = 0; 417 ffs_sbupdate(ump, MNT_WAIT); 418 } 419 } 420 return (err); 421} 422 423/* 424 * Reload all incore data for a filesystem (used after running fsck on 425 * the root filesystem and finding things to fix). The filesystem must 426 * be mounted read-only. 427 * 428 * Things to do to update the mount: 429 * 1) invalidate all cached meta-data. 430 * 2) re-read superblock from disk. 431 * 3) re-read summary information from disk. 432 * 4) invalidate all inactive vnodes. 433 * 5) invalidate all cached file data. 434 * 6) re-read inode data for all active vnodes. 435 */ 436static int 437ffs_reload(mp, cred, p) 438 register struct mount *mp; 439 struct ucred *cred; 440 struct proc *p; 441{ 442 register struct vnode *vp, *nvp, *devvp; 443 struct inode *ip; 444 struct csum *space; 445 struct buf *bp; 446 struct fs *fs, *newfs; 447 struct partinfo dpart; 448 dev_t dev; 449 int i, blks, size, error; 450 int32_t *lp; 451 452 if ((mp->mnt_flag & MNT_RDONLY) == 0) 453 return (EINVAL); 454 /* 455 * Step 1: invalidate all cached meta-data. 456 */ 457 devvp = VFSTOUFS(mp)->um_devvp; 458 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p); 459 error = vinvalbuf(devvp, 0, cred, p, 0, 0); 460 VOP_UNLOCK(devvp, 0, p); 461 if (error) 462 panic("ffs_reload: dirty1"); 463 464 dev = devvp->v_rdev; 465 /* 466 * Only VMIO the backing device if the backing device is a real 467 * block device. This excludes the original MFS implementation. 468 * Note that it is optional that the backing device be VMIOed. This 469 * increases the opportunity for metadata caching. 470 */ 471 if ((devvp->v_type == VBLK) && (major(dev) < nblkdev)) { 472 simple_lock(&devvp->v_interlock); 473 vfs_object_create(devvp, p, p->p_ucred, 0); 474 } 475 476 /* 477 * Step 2: re-read superblock from disk. 478 */ 479 if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, NOCRED, p) != 0) 480 size = DEV_BSIZE; 481 else 482 size = dpart.disklab->d_secsize; 483 if (error = bread(devvp, (ufs_daddr_t)(SBOFF/size), SBSIZE, NOCRED,&bp)) 484 return (error); 485 newfs = (struct fs *)bp->b_data; 486 if (newfs->fs_magic != FS_MAGIC || newfs->fs_bsize > MAXBSIZE || 487 newfs->fs_bsize < sizeof(struct fs)) { 488 brelse(bp); 489 return (EIO); /* XXX needs translation */ 490 } 491 fs = VFSTOUFS(mp)->um_fs; 492 /* 493 * Copy pointer fields back into superblock before copying in XXX 494 * new superblock. These should really be in the ufsmount. XXX 495 * Note that important parameters (eg fs_ncg) are unchanged. 496 */ 497 bcopy(&fs->fs_csp[0], &newfs->fs_csp[0], sizeof(fs->fs_csp)); 498 newfs->fs_maxcluster = fs->fs_maxcluster; 499 bcopy(newfs, fs, (u_int)fs->fs_sbsize); 500 if (fs->fs_sbsize < SBSIZE) 501 bp->b_flags |= B_INVAL; 502 brelse(bp); 503 mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen; 504 ffs_oldfscompat(fs); 505 506 /* 507 * Step 3: re-read summary information from disk. 508 */ 509 blks = howmany(fs->fs_cssize, fs->fs_fsize); 510 space = fs->fs_csp[0]; 511 for (i = 0; i < blks; i += fs->fs_frag) { 512 size = fs->fs_bsize; 513 if (i + fs->fs_frag > blks) 514 size = (blks - i) * fs->fs_fsize; 515 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size, 516 NOCRED, &bp); 517 if (error) 518 return (error); 519 bcopy(bp->b_data, fs->fs_csp[fragstoblks(fs, i)], (u_int)size); 520 brelse(bp); 521 } 522 /* 523 * We no longer know anything about clusters per cylinder group. 524 */ 525 if (fs->fs_contigsumsize > 0) { 526 lp = fs->fs_maxcluster; 527 for (i = 0; i < fs->fs_ncg; i++) 528 *lp++ = fs->fs_contigsumsize; 529 } 530 531loop: 532 simple_lock(&mntvnode_slock); 533 for (vp = mp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) { 534 if (vp->v_mount != mp) { 535 simple_unlock(&mntvnode_slock); 536 goto loop; 537 } 538 nvp = vp->v_mntvnodes.le_next; 539 /* 540 * Step 4: invalidate all inactive vnodes. 541 */ 542 if (vrecycle(vp, &mntvnode_slock, p)) 543 goto loop; 544 /* 545 * Step 5: invalidate all cached file data. 546 */ 547 simple_lock(&vp->v_interlock); 548 simple_unlock(&mntvnode_slock); 549 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p)) { 550 goto loop; 551 } 552 if (vinvalbuf(vp, 0, cred, p, 0, 0)) 553 panic("ffs_reload: dirty2"); 554 /* 555 * Step 6: re-read inode data for all active vnodes. 556 */ 557 ip = VTOI(vp); 558 error = 559 bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), 560 (int)fs->fs_bsize, NOCRED, &bp); 561 if (error) { 562 vput(vp); 563 return (error); 564 } 565 ip->i_din = *((struct dinode *)bp->b_data + 566 ino_to_fsbo(fs, ip->i_number)); 567 ip->i_effnlink = ip->i_nlink; 568 brelse(bp); 569 vput(vp); 570 simple_lock(&mntvnode_slock); 571 } 572 simple_unlock(&mntvnode_slock); 573 return (0); 574} 575 576/* 577 * Common code for mount and mountroot 578 */ 579int 580ffs_mountfs(devvp, mp, p, malloctype) 581 register struct vnode *devvp; 582 struct mount *mp; 583 struct proc *p; 584 struct malloc_type *malloctype; 585{ 586 register struct ufsmount *ump; 587 struct buf *bp; 588 register struct fs *fs; 589 struct cg *cgp; 590 dev_t dev; 591 struct partinfo dpart; 592 struct csum cstotal; 593 caddr_t base, space; 594 int error, i, cyl, blks, size, ronly; 595 int32_t *lp; 596 struct ucred *cred; 597 u_int64_t maxfilesize; /* XXX */ 598 size_t strsize; 599 int ncount; 600 601 dev = devvp->v_rdev; 602 cred = p ? p->p_ucred : NOCRED; 603 /* 604 * Disallow multiple mounts of the same device. 605 * Disallow mounting of a device that is currently in use 606 * (except for root, which might share swap device for miniroot). 607 * Flush out any old buffers remaining from a previous use. 608 */ 609 error = vfs_mountedon(devvp); 610 if (error) 611 return (error); 612 ncount = vcount(devvp); 613 614 if (ncount > 1 && devvp != rootvp) 615 return (EBUSY); 616 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p); 617 error = vinvalbuf(devvp, V_SAVE, cred, p, 0, 0); 618 VOP_UNLOCK(devvp, 0, p); 619 if (error) 620 return (error); 621 622 /* 623 * Only VMIO the backing device if the backing device is a real 624 * block device. This excludes the original MFS implementation. 625 * Note that it is optional that the backing device be VMIOed. This 626 * increases the opportunity for metadata caching. 627 */ 628 if ((devvp->v_type == VBLK) && (major(dev) < nblkdev)) { 629 simple_lock(&devvp->v_interlock); 630 vfs_object_create(devvp, p, p->p_ucred, 0); 631 } 632 633 634 ronly = (mp->mnt_flag & MNT_RDONLY) != 0; 635 error = VOP_OPEN(devvp, ronly ? FREAD : FREAD|FWRITE, FSCRED, p); 636 if (error) 637 return (error); 638 639 if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, cred, p) != 0) 640 size = DEV_BSIZE; 641 else 642 size = dpart.disklab->d_secsize; 643 644 bp = NULL; 645 ump = NULL; 646 if (error = bread(devvp, SBLOCK, SBSIZE, cred, &bp)) 647 goto out; 648 fs = (struct fs *)bp->b_data; 649 if (fs->fs_magic != FS_MAGIC || fs->fs_bsize > MAXBSIZE || 650 fs->fs_bsize < sizeof(struct fs)) { 651 error = EINVAL; /* XXX needs translation */ 652 goto out; 653 } 654 fs->fs_fmod = 0; 655 if (!fs->fs_clean) { 656 if (ronly || (mp->mnt_flag & MNT_FORCE)) { 657 printf("WARNING: %s was not properly dismounted.\n",fs->fs_fsmnt); 658 } else { 659 printf("WARNING: R/W mount of %s denied. Filesystem is not clean - run fsck.\n",fs->fs_fsmnt); 660 error = EPERM; 661 goto out; 662 } 663 } 664 /* XXX updating 4.2 FFS superblocks trashes rotational layout tables */ 665 if (fs->fs_postblformat == FS_42POSTBLFMT && !ronly) { 666 error = EROFS; /* needs translation */ 667 goto out; 668 } 669 ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK); 670 bzero((caddr_t)ump, sizeof *ump); 671 ump->um_malloctype = malloctype; 672 ump->um_fs = malloc((u_long)fs->fs_sbsize, M_UFSMNT, 673 M_WAITOK); 674 ump->um_blkatoff = ffs_blkatoff; 675 ump->um_truncate = ffs_truncate; 676 ump->um_update = ffs_update; 677 ump->um_valloc = ffs_valloc; 678 ump->um_vfree = ffs_vfree; 679 bcopy(bp->b_data, ump->um_fs, (u_int)fs->fs_sbsize); 680 if (fs->fs_sbsize < SBSIZE) 681 bp->b_flags |= B_INVAL; 682 brelse(bp); 683 bp = NULL; 684 fs = ump->um_fs; 685 fs->fs_ronly = ronly; 686 if (ronly == 0) { 687 fs->fs_fmod = 1; 688 fs->fs_clean = 0; 689 } 690 size = fs->fs_cssize; 691 blks = howmany(size, fs->fs_fsize); 692 if (fs->fs_contigsumsize > 0) 693 size += fs->fs_ncg * sizeof(int32_t); 694 base = space = malloc((u_long)size, M_UFSMNT, M_WAITOK); 695 for (i = 0; i < blks; i += fs->fs_frag) { 696 size = fs->fs_bsize; 697 if (i + fs->fs_frag > blks) 698 size = (blks - i) * fs->fs_fsize; 699 if (error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size, 700 cred, &bp)) { 701 free(base, M_UFSMNT); 702 goto out; 703 } 704 bcopy(bp->b_data, space, (u_int)size); 705 fs->fs_csp[fragstoblks(fs, i)] = (struct csum *)space; 706 space += size; 707 brelse(bp); 708 bp = NULL; 709 } 710 if (fs->fs_contigsumsize > 0) { 711 fs->fs_maxcluster = lp = (int32_t *)space; 712 for (i = 0; i < fs->fs_ncg; i++) 713 *lp++ = fs->fs_contigsumsize; 714 } 715 mp->mnt_data = (qaddr_t)ump; 716 mp->mnt_stat.f_fsid.val[0] = (long)dev; 717 if (fs->fs_id[0] != 0 && fs->fs_id[1] != 0) 718 mp->mnt_stat.f_fsid.val[1] = fs->fs_id[1]; 719 else 720 mp->mnt_stat.f_fsid.val[1] = mp->mnt_vfc->vfc_typenum; 721 mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen; 722 mp->mnt_flag |= MNT_LOCAL; 723 ump->um_mountp = mp; 724 ump->um_dev = dev; 725 ump->um_devvp = devvp; 726 ump->um_nindir = fs->fs_nindir; 727 ump->um_bptrtodb = fs->fs_fsbtodb; 728 ump->um_seqinc = fs->fs_frag; 729 for (i = 0; i < MAXQUOTAS; i++) 730 ump->um_quotas[i] = NULLVP; 731 devvp->v_specmountpoint = mp; 732 ffs_oldfscompat(fs); 733 734 /* 735 * Set FS local "last mounted on" information (NULL pad) 736 */ 737 copystr( mp->mnt_stat.f_mntonname, /* mount point*/ 738 fs->fs_fsmnt, /* copy area*/ 739 sizeof(fs->fs_fsmnt) - 1, /* max size*/ 740 &strsize); /* real size*/ 741 bzero( fs->fs_fsmnt + strsize, sizeof(fs->fs_fsmnt) - strsize); 742 743 if( mp->mnt_flag & MNT_ROOTFS) { 744 /* 745 * Root mount; update timestamp in mount structure. 746 * this will be used by the common root mount code 747 * to update the system clock. 748 */ 749 mp->mnt_time = fs->fs_time; 750 } 751 752 ump->um_savedmaxfilesize = fs->fs_maxfilesize; /* XXX */ 753 maxfilesize = (u_int64_t)0x40000000 * fs->fs_bsize - 1; /* XXX */ 754 if (fs->fs_maxfilesize > maxfilesize) /* XXX */ 755 fs->fs_maxfilesize = maxfilesize; /* XXX */ 756 if (ronly == 0) { 757 if ((fs->fs_flags & FS_DOSOFTDEP) && 758 (error = softdep_mount(devvp, mp, fs, cred)) != 0) { 759 free(base, M_UFSMNT); 760 goto out; 761 } 762 fs->fs_clean = 0; 763 (void) ffs_sbupdate(ump, MNT_WAIT); 764 } 765 return (0); 766out: 767 devvp->v_specmountpoint = NULL; 768 if (bp) 769 brelse(bp); 770 (void)VOP_CLOSE(devvp, ronly ? FREAD : FREAD|FWRITE, cred, p); 771 if (ump) { 772 free(ump->um_fs, M_UFSMNT); 773 free(ump, M_UFSMNT); 774 mp->mnt_data = (qaddr_t)0; 775 } 776 return (error); 777} 778 779/* 780 * Sanity checks for old file systems. 781 * 782 * XXX - goes away some day. 783 */ 784static int 785ffs_oldfscompat(fs) 786 struct fs *fs; 787{ 788 789 fs->fs_npsect = max(fs->fs_npsect, fs->fs_nsect); /* XXX */ 790 fs->fs_interleave = max(fs->fs_interleave, 1); /* XXX */ 791 if (fs->fs_postblformat == FS_42POSTBLFMT) /* XXX */ 792 fs->fs_nrpos = 8; /* XXX */ 793 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */ 794#if 0 795 int i; /* XXX */ 796 u_int64_t sizepb = fs->fs_bsize; /* XXX */ 797 /* XXX */ 798 fs->fs_maxfilesize = fs->fs_bsize * NDADDR - 1; /* XXX */ 799 for (i = 0; i < NIADDR; i++) { /* XXX */ 800 sizepb *= NINDIR(fs); /* XXX */ 801 fs->fs_maxfilesize += sizepb; /* XXX */ 802 } /* XXX */ 803#endif 804 fs->fs_maxfilesize = (u_quad_t) 1LL << 39; 805 fs->fs_qbmask = ~fs->fs_bmask; /* XXX */ 806 fs->fs_qfmask = ~fs->fs_fmask; /* XXX */ 807 } /* XXX */ 808 return (0); 809} 810 811/* 812 * unmount system call 813 */ 814int 815ffs_unmount(mp, mntflags, p) 816 struct mount *mp; 817 int mntflags; 818 struct proc *p; 819{ 820 register struct ufsmount *ump; 821 register struct fs *fs; 822 int error, flags; 823 824 flags = 0; 825 if (mntflags & MNT_FORCE) { 826 flags |= FORCECLOSE; 827 } 828 if (mp->mnt_flag & MNT_SOFTDEP) { 829 if ((error = softdep_flushfiles(mp, flags, p)) != 0) 830 return (error); 831 } else { 832 if ((error = ffs_flushfiles(mp, flags, p)) != 0) 833 return (error); 834 } 835 ump = VFSTOUFS(mp); 836 fs = ump->um_fs; 837 if (fs->fs_ronly == 0) { 838 fs->fs_clean = 1; 839 error = ffs_sbupdate(ump, MNT_WAIT); 840 if (error) { 841 fs->fs_clean = 0; 842 return (error); 843 } 844 } 845 ump->um_devvp->v_specmountpoint = NULL; 846 847 vinvalbuf(ump->um_devvp, V_SAVE, NOCRED, p, 0, 0); 848 error = VOP_CLOSE(ump->um_devvp, fs->fs_ronly ? FREAD : FREAD|FWRITE, 849 NOCRED, p); 850 851 vrele(ump->um_devvp); 852 853 free(fs->fs_csp[0], M_UFSMNT); 854 free(fs, M_UFSMNT); 855 free(ump, M_UFSMNT); 856 mp->mnt_data = (qaddr_t)0; 857 mp->mnt_flag &= ~MNT_LOCAL; 858 return (error); 859} 860 861/* 862 * Flush out all the files in a filesystem. 863 */ 864int 865ffs_flushfiles(mp, flags, p) 866 register struct mount *mp; 867 int flags; 868 struct proc *p; 869{ 870 register struct ufsmount *ump; 871 int error; 872 873 ump = VFSTOUFS(mp); 874#ifdef QUOTA 875 if (mp->mnt_flag & MNT_QUOTA) { 876 int i; 877 error = vflush(mp, NULLVP, SKIPSYSTEM|flags); 878 if (error) 879 return (error); 880 for (i = 0; i < MAXQUOTAS; i++) { 881 if (ump->um_quotas[i] == NULLVP) 882 continue; 883 quotaoff(p, mp, i); 884 } 885 /* 886 * Here we fall through to vflush again to ensure 887 * that we have gotten rid of all the system vnodes. 888 */ 889 } 890#endif 891 /* 892 * Flush all the files. 893 */ 894 if ((error = vflush(mp, NULL, flags)) != 0) 895 return (error); 896 /* 897 * Flush filesystem metadata. 898 */ 899 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY, p); 900 error = VOP_FSYNC(ump->um_devvp, p->p_ucred, MNT_WAIT, p); 901 VOP_UNLOCK(ump->um_devvp, 0, p); 902 return (error); 903} 904 905/* 906 * Get file system statistics. 907 */ 908int 909ffs_statfs(mp, sbp, p) 910 struct mount *mp; 911 register struct statfs *sbp; 912 struct proc *p; 913{ 914 register struct ufsmount *ump; 915 register struct fs *fs; 916 917 ump = VFSTOUFS(mp); 918 fs = ump->um_fs; 919 if (fs->fs_magic != FS_MAGIC) 920 panic("ffs_statfs"); 921 sbp->f_bsize = fs->fs_fsize; 922 sbp->f_iosize = fs->fs_bsize; 923 sbp->f_blocks = fs->fs_dsize; 924 sbp->f_bfree = fs->fs_cstotal.cs_nbfree * fs->fs_frag + 925 fs->fs_cstotal.cs_nffree; 926 sbp->f_bavail = freespace(fs, fs->fs_minfree); 927 sbp->f_files = fs->fs_ncg * fs->fs_ipg - ROOTINO; 928 sbp->f_ffree = fs->fs_cstotal.cs_nifree; 929 if (sbp != &mp->mnt_stat) { 930 sbp->f_type = mp->mnt_vfc->vfc_typenum; 931 bcopy((caddr_t)mp->mnt_stat.f_mntonname, 932 (caddr_t)&sbp->f_mntonname[0], MNAMELEN); 933 bcopy((caddr_t)mp->mnt_stat.f_mntfromname, 934 (caddr_t)&sbp->f_mntfromname[0], MNAMELEN); 935 } 936 return (0); 937} 938 939/* 940 * Go through the disk queues to initiate sandbagged IO; 941 * go through the inodes to write those that have been modified; 942 * initiate the writing of the super block if it has been modified. 943 * 944 * Note: we are always called with the filesystem marked `MPBUSY'. 945 */ 946int 947ffs_sync(mp, waitfor, cred, p) 948 struct mount *mp; 949 int waitfor; 950 struct ucred *cred; 951 struct proc *p; 952{ 953 struct vnode *nvp, *vp; 954 struct inode *ip; 955 struct ufsmount *ump = VFSTOUFS(mp); 956 struct fs *fs; 957 struct timeval tv; 958 int error, allerror = 0; 959 960 fs = ump->um_fs; 961 if (fs->fs_fmod != 0 && fs->fs_ronly != 0) { /* XXX */ 962 printf("fs = %s\n", fs->fs_fsmnt); 963 panic("ffs_sync: rofs mod"); 964 } 965 /* 966 * Write back each (modified) inode. 967 */ 968 simple_lock(&mntvnode_slock); 969loop: 970 for (vp = mp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) { 971 /* 972 * If the vnode that we are about to sync is no longer 973 * associated with this mount point, start over. 974 */ 975 if (vp->v_mount != mp) 976 goto loop; 977 simple_lock(&vp->v_interlock); 978 nvp = vp->v_mntvnodes.le_next; 979 ip = VTOI(vp); 980 if ((vp->v_type == VNON) || ((ip->i_flag & 981 (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0) && 982 ((vp->v_dirtyblkhd.lh_first == NULL) || (waitfor == MNT_LAZY))) { 983 simple_unlock(&vp->v_interlock); 984 continue; 985 } 986 if (vp->v_type != VCHR) { 987 simple_unlock(&mntvnode_slock); 988 error = 989 vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, p); 990 if (error) { 991 simple_lock(&mntvnode_slock); 992 if (error == ENOENT) 993 goto loop; 994 continue; 995 } 996 if (error = VOP_FSYNC(vp, cred, waitfor, p)) 997 allerror = error; 998 VOP_UNLOCK(vp, 0, p); 999 vrele(vp); 1000 simple_lock(&mntvnode_slock); 1001 } else { 1002 simple_unlock(&mntvnode_slock); 1003 simple_unlock(&vp->v_interlock); 1004 getmicrotime(&tv); 1005 /* UFS_UPDATE(vp, &tv, &tv, waitfor == MNT_WAIT); */ 1006 UFS_UPDATE(vp, &tv, &tv, 0); 1007 simple_lock(&mntvnode_slock); 1008 } 1009 } 1010 simple_unlock(&mntvnode_slock); 1011 /* 1012 * Force stale file system control information to be flushed. 1013 */ 1014 if (waitfor != MNT_LAZY) { 1015 if (ump->um_mountp->mnt_flag & MNT_SOFTDEP) 1016 waitfor = MNT_NOWAIT; 1017 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY, p); 1018 if ((error = VOP_FSYNC(ump->um_devvp, cred, waitfor, p)) != 0) 1019 allerror = error; 1020 VOP_UNLOCK(ump->um_devvp, 0, p); 1021 } 1022#ifdef QUOTA 1023 qsync(mp); 1024#endif 1025 /* 1026 * Write back modified superblock. 1027 */ 1028 if (fs->fs_fmod != 0 && (error = ffs_sbupdate(ump, waitfor)) != 0) 1029 allerror = error; 1030 return (allerror); 1031} 1032 1033/* 1034 * Look up a FFS dinode number to find its incore vnode, otherwise read it 1035 * in from disk. If it is in core, wait for the lock bit to clear, then 1036 * return the inode locked. Detection and handling of mount points must be 1037 * done by the calling routine. 1038 */ 1039static int ffs_inode_hash_lock; 1040 1041int 1042ffs_vget(mp, ino, vpp) 1043 struct mount *mp; 1044 ino_t ino; 1045 struct vnode **vpp; 1046{ 1047 struct fs *fs; 1048 struct inode *ip; 1049 struct ufsmount *ump; 1050 struct buf *bp; 1051 struct vnode *vp; 1052 dev_t dev; 1053 int error; 1054 1055 ump = VFSTOUFS(mp); 1056 dev = ump->um_dev; 1057restart: 1058 if ((*vpp = ufs_ihashget(dev, ino)) != NULL) { 1059 return (0); 1060 } 1061 1062 /* 1063 * Lock out the creation of new entries in the FFS hash table in 1064 * case getnewvnode() or MALLOC() blocks, otherwise a duplicate 1065 * may occur! 1066 */ 1067 if (ffs_inode_hash_lock) { 1068 while (ffs_inode_hash_lock) { 1069 ffs_inode_hash_lock = -1; 1070 tsleep(&ffs_inode_hash_lock, PVM, "ffsvgt", 0); 1071 } 1072 goto restart; 1073 } 1074 ffs_inode_hash_lock = 1; 1075 1076 /* 1077 * If this MALLOC() is performed after the getnewvnode() 1078 * it might block, leaving a vnode with a NULL v_data to be 1079 * found by ffs_sync() if a sync happens to fire right then, 1080 * which will cause a panic because ffs_sync() blindly 1081 * dereferences vp->v_data (as well it should). 1082 */ 1083 MALLOC(ip, struct inode *, sizeof(struct inode), 1084 ump->um_malloctype, M_WAITOK); 1085 1086 /* Allocate a new vnode/inode. */ 1087 error = getnewvnode(VT_UFS, mp, ffs_vnodeop_p, &vp); 1088 if (error) { 1089 if (ffs_inode_hash_lock < 0) 1090 wakeup(&ffs_inode_hash_lock); 1091 ffs_inode_hash_lock = 0; 1092 *vpp = NULL; 1093 FREE(ip, ump->um_malloctype); 1094 return (error); 1095 } 1096 bzero((caddr_t)ip, sizeof(struct inode)); 1097 lockinit(&ip->i_lock, PINOD, "inode", 0, 0); 1098 vp->v_data = ip; 1099 ip->i_vnode = vp; 1100 ip->i_fs = fs = ump->um_fs; 1101 ip->i_dev = dev; 1102 ip->i_number = ino; 1103#ifdef QUOTA 1104 { 1105 int i; 1106 for (i = 0; i < MAXQUOTAS; i++) 1107 ip->i_dquot[i] = NODQUOT; 1108 } 1109#endif 1110 /* 1111 * Put it onto its hash chain and lock it so that other requests for 1112 * this inode will block if they arrive while we are sleeping waiting 1113 * for old data structures to be purged or for the contents of the 1114 * disk portion of this inode to be read. 1115 */ 1116 ufs_ihashins(ip); 1117 1118 if (ffs_inode_hash_lock < 0) 1119 wakeup(&ffs_inode_hash_lock); 1120 ffs_inode_hash_lock = 0; 1121 1122 /* Read in the disk contents for the inode, copy into the inode. */ 1123 error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)), 1124 (int)fs->fs_bsize, NOCRED, &bp); 1125 if (error) { 1126 /* 1127 * The inode does not contain anything useful, so it would 1128 * be misleading to leave it on its hash chain. With mode 1129 * still zero, it will be unlinked and returned to the free 1130 * list by vput(). 1131 */ 1132 brelse(bp); 1133 vput(vp); 1134 *vpp = NULL; 1135 return (error); 1136 } 1137 ip->i_din = *((struct dinode *)bp->b_data + ino_to_fsbo(fs, ino)); 1138 if (DOINGSOFTDEP(vp)) 1139 softdep_load_inodeblock(ip); 1140 else 1141 ip->i_effnlink = ip->i_nlink; 1142 bqrelse(bp); 1143 1144 /* 1145 * Initialize the vnode from the inode, check for aliases. 1146 * Note that the underlying vnode may have changed. 1147 */ 1148 error = ufs_vinit(mp, ffs_specop_p, ffs_fifoop_p, &vp); 1149 if (error) { 1150 vput(vp); 1151 *vpp = NULL; 1152 return (error); 1153 } 1154 /* 1155 * Finish inode initialization now that aliasing has been resolved. 1156 */ 1157 ip->i_devvp = ump->um_devvp; 1158 VREF(ip->i_devvp); 1159 /* 1160 * Set up a generation number for this inode if it does not 1161 * already have one. This should only happen on old filesystems. 1162 */ 1163 if (ip->i_gen == 0) { 1164 ip->i_gen = random() / 2 + 1; 1165 if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0) 1166 ip->i_flag |= IN_MODIFIED; 1167 } 1168 /* 1169 * Ensure that uid and gid are correct. This is a temporary 1170 * fix until fsck has been changed to do the update. 1171 */ 1172 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */ 1173 ip->i_uid = ip->i_din.di_ouid; /* XXX */ 1174 ip->i_gid = ip->i_din.di_ogid; /* XXX */ 1175 } /* XXX */ 1176 1177 *vpp = vp; 1178 return (0); 1179} 1180 1181/* 1182 * File handle to vnode 1183 * 1184 * Have to be really careful about stale file handles: 1185 * - check that the inode number is valid 1186 * - call ffs_vget() to get the locked inode 1187 * - check for an unallocated inode (i_mode == 0) 1188 * - check that the given client host has export rights and return 1189 * those rights via. exflagsp and credanonp 1190 */ 1191int 1192ffs_fhtovp(mp, fhp, nam, vpp, exflagsp, credanonp) 1193 register struct mount *mp; 1194 struct fid *fhp; 1195 struct sockaddr *nam; 1196 struct vnode **vpp; 1197 int *exflagsp; 1198 struct ucred **credanonp; 1199{ 1200 register struct ufid *ufhp; 1201 struct fs *fs; 1202 1203 ufhp = (struct ufid *)fhp; 1204 fs = VFSTOUFS(mp)->um_fs; 1205 if (ufhp->ufid_ino < ROOTINO || 1206 ufhp->ufid_ino >= fs->fs_ncg * fs->fs_ipg) 1207 return (ESTALE); 1208 return (ufs_check_export(mp, ufhp, nam, vpp, exflagsp, credanonp)); 1209} 1210 1211/* 1212 * Vnode pointer to File handle 1213 */ 1214/* ARGSUSED */ 1215int 1216ffs_vptofh(vp, fhp) 1217 struct vnode *vp; 1218 struct fid *fhp; 1219{ 1220 register struct inode *ip; 1221 register struct ufid *ufhp; 1222 1223 ip = VTOI(vp); 1224 ufhp = (struct ufid *)fhp; 1225 ufhp->ufid_len = sizeof(struct ufid); 1226 ufhp->ufid_ino = ip->i_number; 1227 ufhp->ufid_gen = ip->i_gen; 1228 return (0); 1229} 1230 1231/* 1232 * Initialize the filesystem; just use ufs_init. 1233 */ 1234static int 1235ffs_init(vfsp) 1236 struct vfsconf *vfsp; 1237{ 1238 1239 softdep_initialize(); 1240 return (ufs_init(vfsp)); 1241} 1242 1243/* 1244 * Write a superblock and associated information back to disk. 1245 */ 1246static int 1247ffs_sbupdate(mp, waitfor) 1248 struct ufsmount *mp; 1249 int waitfor; 1250{ 1251 register struct fs *dfs, *fs = mp->um_fs; 1252 register struct buf *bp; 1253 int blks; 1254 caddr_t space; 1255 int i, size, error, allerror = 0; 1256 1257 /* 1258 * First write back the summary information. 1259 */ 1260 blks = howmany(fs->fs_cssize, fs->fs_fsize); 1261 space = (caddr_t)fs->fs_csp[0]; 1262 for (i = 0; i < blks; i += fs->fs_frag) { 1263 size = fs->fs_bsize; 1264 if (i + fs->fs_frag > blks) 1265 size = (blks - i) * fs->fs_fsize; 1266 bp = getblk(mp->um_devvp, fsbtodb(fs, fs->fs_csaddr + i), 1267 size, 0, 0); 1268 bcopy(space, bp->b_data, (u_int)size); 1269 space += size; 1270 if (waitfor != MNT_WAIT) 1271 bawrite(bp); 1272 else if (error = bwrite(bp)) 1273 allerror = error; 1274 } 1275 /* 1276 * Now write back the superblock itself. If any errors occurred 1277 * up to this point, then fail so that the superblock avoids 1278 * being written out as clean. 1279 */ 1280 if (allerror) 1281 return (allerror); 1282 bp = getblk(mp->um_devvp, SBLOCK, (int)fs->fs_sbsize, 0, 0); 1283 fs->fs_fmod = 0; 1284 fs->fs_time = time_second; 1285 bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize); 1286 /* Restore compatibility to old file systems. XXX */ 1287 dfs = (struct fs *)bp->b_data; /* XXX */ 1288 if (fs->fs_postblformat == FS_42POSTBLFMT) /* XXX */ 1289 dfs->fs_nrpos = -1; /* XXX */ 1290 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */ 1291 int32_t *lp, tmp; /* XXX */ 1292 /* XXX */ 1293 lp = (int32_t *)&dfs->fs_qbmask; /* XXX */ 1294 tmp = lp[4]; /* XXX */ 1295 for (i = 4; i > 0; i--) /* XXX */ 1296 lp[i] = lp[i-1]; /* XXX */ 1297 lp[0] = tmp; /* XXX */ 1298 } /* XXX */ 1299 dfs->fs_maxfilesize = mp->um_savedmaxfilesize; /* XXX */ 1300 if (waitfor != MNT_WAIT) 1301 bawrite(bp); 1302 else if (error = bwrite(bp)) 1303 allerror = error; 1304 return (allerror); 1305} 1306