ffs_vfsops.c revision 71999
1/* 2 * Copyright (c) 1989, 1991, 1993, 1994 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * @(#)ffs_vfsops.c 8.31 (Berkeley) 5/20/95 34 * $FreeBSD: head/sys/ufs/ffs/ffs_vfsops.c 71999 2001-02-04 13:13:25Z phk $ 35 */ 36 37#include "opt_ffs.h" 38#include "opt_quota.h" 39 40#include <sys/param.h> 41#include <sys/systm.h> 42#include <sys/namei.h> 43#include <sys/proc.h> 44#include <sys/kernel.h> 45#include <sys/vnode.h> 46#include <sys/mount.h> 47#include <sys/bio.h> 48#include <sys/buf.h> 49#include <sys/conf.h> 50#include <sys/fcntl.h> 51#include <sys/disklabel.h> 52#include <sys/malloc.h> 53#include <sys/mutex.h> 54 55#include <ufs/ufs/extattr.h> 56#include <ufs/ufs/quota.h> 57#include <ufs/ufs/ufsmount.h> 58#include <ufs/ufs/inode.h> 59#include <ufs/ufs/ufs_extern.h> 60 61#include <ufs/ffs/fs.h> 62#include <ufs/ffs/ffs_extern.h> 63 64#include <vm/vm.h> 65#include <vm/vm_page.h> 66 67static MALLOC_DEFINE(M_FFSNODE, "FFS node", "FFS vnode private part"); 68 69static int ffs_sbupdate __P((struct ufsmount *, int)); 70int ffs_reload __P((struct mount *,struct ucred *,struct proc *)); 71static int ffs_oldfscompat __P((struct fs *)); 72static int ffs_init __P((struct vfsconf *)); 73 74static struct vfsops ufs_vfsops = { 75 ffs_mount, 76 ufs_start, 77 ffs_unmount, 78 ufs_root, 79 ufs_quotactl, 80 ffs_statfs, 81 ffs_sync, 82 ffs_vget, 83 ffs_fhtovp, 84 ufs_check_export, 85 ffs_vptofh, 86 ffs_init, 87 vfs_stduninit, 88#ifdef FFS_EXTATTR 89 ufs_extattrctl, 90#else 91 vfs_stdextattrctl, 92#endif 93}; 94 95VFS_SET(ufs_vfsops, ufs, 0); 96 97/* 98 * ffs_mount 99 * 100 * Called when mounting local physical media 101 * 102 * PARAMETERS: 103 * mountroot 104 * mp mount point structure 105 * path NULL (flag for root mount!!!) 106 * data <unused> 107 * ndp <unused> 108 * p process (user credentials check [statfs]) 109 * 110 * mount 111 * mp mount point structure 112 * path path to mount point 113 * data pointer to argument struct in user space 114 * ndp mount point namei() return (used for 115 * credentials on reload), reused to look 116 * up block device. 117 * p process (user credentials check) 118 * 119 * RETURNS: 0 Success 120 * !0 error number (errno.h) 121 * 122 * LOCK STATE: 123 * 124 * ENTRY 125 * mount point is locked 126 * EXIT 127 * mount point is locked 128 * 129 * NOTES: 130 * A NULL path can be used for a flag since the mount 131 * system call will fail with EFAULT in copyinstr in 132 * namei() if it is a genuine NULL from the user. 133 */ 134int 135ffs_mount(mp, path, data, ndp, p) 136 struct mount *mp; /* mount struct pointer*/ 137 char *path; /* path to mount point*/ 138 caddr_t data; /* arguments to FS specific mount*/ 139 struct nameidata *ndp; /* mount point credentials*/ 140 struct proc *p; /* process requesting mount*/ 141{ 142 size_t size; 143 struct vnode *devvp; 144 struct ufs_args args; 145 struct ufsmount *ump = 0; 146 register struct fs *fs; 147 int error, flags; 148 mode_t accessmode; 149 150 /* 151 * Use NULL path to indicate we are mounting the root file system. 152 */ 153 if (path == NULL) { 154 if ((error = bdevvp(rootdev, &rootvp))) { 155 printf("ffs_mountroot: can't find rootvp\n"); 156 return (error); 157 } 158 159 if ((error = ffs_mountfs(rootvp, mp, p, M_FFSNODE)) != 0) 160 return (error); 161 162 (void)VFS_STATFS(mp, &mp->mnt_stat, p); 163 return (0); 164 } 165 166 /* 167 * Mounting non-root file system or updating a file system 168 */ 169 if ((error = copyin(data, (caddr_t)&args, sizeof(struct ufs_args)))!= 0) 170 return (error); 171 172 /* 173 * If updating, check whether changing from read-only to 174 * read/write; if there is no device name, that's all we do. 175 */ 176 if (mp->mnt_flag & MNT_UPDATE) { 177 ump = VFSTOUFS(mp); 178 fs = ump->um_fs; 179 devvp = ump->um_devvp; 180 if (fs->fs_ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) { 181 if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0) 182 return (error); 183 flags = WRITECLOSE; 184 if (mp->mnt_flag & MNT_FORCE) 185 flags |= FORCECLOSE; 186 if (mp->mnt_flag & MNT_SOFTDEP) { 187 error = softdep_flushfiles(mp, flags, p); 188 } else { 189 error = ffs_flushfiles(mp, flags, p); 190 } 191 if (error) { 192 vn_finished_write(mp); 193 return (error); 194 } 195 fs->fs_ronly = 1; 196 if ((fs->fs_flags & FS_UNCLEAN) == 0) 197 fs->fs_clean = 1; 198 if ((error = ffs_sbupdate(ump, MNT_WAIT)) != 0) { 199 fs->fs_ronly = 0; 200 fs->fs_clean = 0; 201 vn_finished_write(mp); 202 return (error); 203 } 204 vn_finished_write(mp); 205 } 206 if ((mp->mnt_flag & MNT_RELOAD) && 207 (error = ffs_reload(mp, ndp->ni_cnd.cn_cred, p)) != 0) 208 return (error); 209 if (fs->fs_ronly && (mp->mnt_kern_flag & MNTK_WANTRDWR)) { 210 /* 211 * If upgrade to read-write by non-root, then verify 212 * that user has necessary permissions on the device. 213 */ 214 if (p->p_ucred->cr_uid != 0) { 215 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p); 216 if ((error = VOP_ACCESS(devvp, VREAD | VWRITE, 217 p->p_ucred, p)) != 0) { 218 VOP_UNLOCK(devvp, 0, p); 219 return (error); 220 } 221 VOP_UNLOCK(devvp, 0, p); 222 } 223 fs->fs_flags &= ~FS_UNCLEAN; 224 if (fs->fs_clean == 0) { 225 fs->fs_flags |= FS_UNCLEAN; 226 if (mp->mnt_flag & MNT_FORCE) { 227 printf("WARNING: %s was not %s\n", 228 fs->fs_fsmnt, "properly dismounted"); 229 } else { 230 printf( 231"WARNING: R/W mount of %s denied. Filesystem is not clean - run fsck\n", 232 fs->fs_fsmnt); 233 return (EPERM); 234 } 235 } 236 if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0) 237 return (error); 238 fs->fs_ronly = 0; 239 fs->fs_clean = 0; 240 if ((error = ffs_sbupdate(ump, MNT_WAIT)) != 0) { 241 vn_finished_write(mp); 242 return (error); 243 } 244 /* check to see if we need to start softdep */ 245 if ((fs->fs_flags & FS_DOSOFTDEP) && 246 (error = softdep_mount(devvp, mp, fs, p->p_ucred))){ 247 vn_finished_write(mp); 248 return (error); 249 } 250 if (fs->fs_snapinum[0] != 0) 251 ffs_snapshot_mount(mp); 252 vn_finished_write(mp); 253 } 254 /* 255 * Soft updates is incompatible with "async", 256 * so if we are doing softupdates stop the user 257 * from setting the async flag in an update. 258 * Softdep_mount() clears it in an initial mount 259 * or ro->rw remount. 260 */ 261 if (mp->mnt_flag & MNT_SOFTDEP) 262 mp->mnt_flag &= ~MNT_ASYNC; 263 /* 264 * If not updating name, process export requests. 265 */ 266 if (args.fspec == 0) 267 return (vfs_export(mp, &ump->um_export, &args.export)); 268 /* 269 * If this is a snapshot request, take the snapshot. 270 */ 271 if (mp->mnt_flag & MNT_SNAPSHOT) 272 return (ffs_snapshot(mp, args.fspec)); 273 } 274 275 /* 276 * Not an update, or updating the name: look up the name 277 * and verify that it refers to a sensible block device. 278 */ 279 NDINIT(ndp, LOOKUP, FOLLOW, UIO_USERSPACE, args.fspec, p); 280 if ((error = namei(ndp)) != 0) 281 return (error); 282 NDFREE(ndp, NDF_ONLY_PNBUF); 283 devvp = ndp->ni_vp; 284 if (!vn_isdisk(devvp, &error)) { 285 vrele(devvp); 286 return (error); 287 } 288 289 /* 290 * If mount by non-root, then verify that user has necessary 291 * permissions on the device. 292 */ 293 if (p->p_ucred->cr_uid != 0) { 294 accessmode = VREAD; 295 if ((mp->mnt_flag & MNT_RDONLY) == 0) 296 accessmode |= VWRITE; 297 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p); 298 if ((error = VOP_ACCESS(devvp, accessmode, p->p_ucred, p))!= 0){ 299 vput(devvp); 300 return (error); 301 } 302 VOP_UNLOCK(devvp, 0, p); 303 } 304 305 if (mp->mnt_flag & MNT_UPDATE) { 306 /* 307 * Update only 308 * 309 * If it's not the same vnode, or at least the same device 310 * then it's not correct. 311 */ 312 313 if (devvp != ump->um_devvp && 314 devvp->v_rdev != ump->um_devvp->v_rdev) 315 error = EINVAL; /* needs translation */ 316 vrele(devvp); 317 if (error) 318 return (error); 319 } else { 320 /* 321 * New mount 322 * 323 * We need the name for the mount point (also used for 324 * "last mounted on") copied in. If an error occurs, 325 * the mount point is discarded by the upper level code. 326 */ 327 copyinstr(path, mp->mnt_stat.f_mntonname, MNAMELEN - 1, &size); 328 bzero( mp->mnt_stat.f_mntonname + size, MNAMELEN - size); 329 if ((error = ffs_mountfs(devvp, mp, p, M_FFSNODE)) != 0) { 330 vrele(devvp); 331 return (error); 332 } 333 } 334 /* 335 * Save "mounted from" device name info for mount point (NULL pad). 336 */ 337 copyinstr(args.fspec, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, &size); 338 bzero( mp->mnt_stat.f_mntfromname + size, MNAMELEN - size); 339 /* 340 * Initialize filesystem stat information in mount struct. 341 */ 342 (void)VFS_STATFS(mp, &mp->mnt_stat, p); 343 return (0); 344} 345 346/* 347 * Reload all incore data for a filesystem (used after running fsck on 348 * the root filesystem and finding things to fix). The filesystem must 349 * be mounted read-only. 350 * 351 * Things to do to update the mount: 352 * 1) invalidate all cached meta-data. 353 * 2) re-read superblock from disk. 354 * 3) re-read summary information from disk. 355 * 4) invalidate all inactive vnodes. 356 * 5) invalidate all cached file data. 357 * 6) re-read inode data for all active vnodes. 358 */ 359int 360ffs_reload(mp, cred, p) 361 register struct mount *mp; 362 struct ucred *cred; 363 struct proc *p; 364{ 365 register struct vnode *vp, *nvp, *devvp; 366 struct inode *ip; 367 void *space; 368 struct buf *bp; 369 struct fs *fs, *newfs; 370 struct partinfo dpart; 371 dev_t dev; 372 int i, blks, size, error; 373 int32_t *lp; 374 375 if ((mp->mnt_flag & MNT_RDONLY) == 0) 376 return (EINVAL); 377 /* 378 * Step 1: invalidate all cached meta-data. 379 */ 380 devvp = VFSTOUFS(mp)->um_devvp; 381 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p); 382 error = vinvalbuf(devvp, 0, cred, p, 0, 0); 383 VOP_UNLOCK(devvp, 0, p); 384 if (error) 385 panic("ffs_reload: dirty1"); 386 387 dev = devvp->v_rdev; 388 389 /* 390 * Only VMIO the backing device if the backing device is a real 391 * block device. See ffs_mountmfs() for more details. 392 */ 393 if (devvp->v_tag != VT_MFS && vn_isdisk(devvp, NULL)) { 394 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p); 395 vfs_object_create(devvp, p, p->p_ucred); 396 mtx_enter(&devvp->v_interlock, MTX_DEF); 397 VOP_UNLOCK(devvp, LK_INTERLOCK, p); 398 } 399 400 /* 401 * Step 2: re-read superblock from disk. 402 */ 403 if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, NOCRED, p) != 0) 404 size = DEV_BSIZE; 405 else 406 size = dpart.disklab->d_secsize; 407 if ((error = bread(devvp, (ufs_daddr_t)(SBOFF/size), SBSIZE, NOCRED,&bp)) != 0) 408 return (error); 409 newfs = (struct fs *)bp->b_data; 410 if (newfs->fs_magic != FS_MAGIC || newfs->fs_bsize > MAXBSIZE || 411 newfs->fs_bsize < sizeof(struct fs)) { 412 brelse(bp); 413 return (EIO); /* XXX needs translation */ 414 } 415 fs = VFSTOUFS(mp)->um_fs; 416 /* 417 * Copy pointer fields back into superblock before copying in XXX 418 * new superblock. These should really be in the ufsmount. XXX 419 * Note that important parameters (eg fs_ncg) are unchanged. 420 */ 421 newfs->fs_csp = fs->fs_csp; 422 newfs->fs_maxcluster = fs->fs_maxcluster; 423 bcopy(newfs, fs, (u_int)fs->fs_sbsize); 424 if (fs->fs_sbsize < SBSIZE) 425 bp->b_flags |= B_INVAL | B_NOCACHE; 426 brelse(bp); 427 mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen; 428 ffs_oldfscompat(fs); 429 430 /* 431 * Step 3: re-read summary information from disk. 432 */ 433 blks = howmany(fs->fs_cssize, fs->fs_fsize); 434 space = fs->fs_csp; 435 for (i = 0; i < blks; i += fs->fs_frag) { 436 size = fs->fs_bsize; 437 if (i + fs->fs_frag > blks) 438 size = (blks - i) * fs->fs_fsize; 439 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size, 440 NOCRED, &bp); 441 if (error) 442 return (error); 443 bcopy(bp->b_data, space, (u_int)size); 444 space = (char *)space + size; 445 brelse(bp); 446 } 447 /* 448 * We no longer know anything about clusters per cylinder group. 449 */ 450 if (fs->fs_contigsumsize > 0) { 451 lp = fs->fs_maxcluster; 452 for (i = 0; i < fs->fs_ncg; i++) 453 *lp++ = fs->fs_contigsumsize; 454 } 455 456loop: 457 mtx_enter(&mntvnode_mtx, MTX_DEF); 458 for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) { 459 if (vp->v_mount != mp) { 460 mtx_exit(&mntvnode_mtx, MTX_DEF); 461 goto loop; 462 } 463 nvp = LIST_NEXT(vp, v_mntvnodes); 464 /* 465 * Step 4: invalidate all inactive vnodes. 466 */ 467 if (vrecycle(vp, &mntvnode_mtx, p)) 468 goto loop; 469 /* 470 * Step 5: invalidate all cached file data. 471 */ 472 mtx_enter(&vp->v_interlock, MTX_DEF); 473 mtx_exit(&mntvnode_mtx, MTX_DEF); 474 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p)) { 475 goto loop; 476 } 477 if (vinvalbuf(vp, 0, cred, p, 0, 0)) 478 panic("ffs_reload: dirty2"); 479 /* 480 * Step 6: re-read inode data for all active vnodes. 481 */ 482 ip = VTOI(vp); 483 error = 484 bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), 485 (int)fs->fs_bsize, NOCRED, &bp); 486 if (error) { 487 vput(vp); 488 return (error); 489 } 490 ip->i_din = *((struct dinode *)bp->b_data + 491 ino_to_fsbo(fs, ip->i_number)); 492 ip->i_effnlink = ip->i_nlink; 493 brelse(bp); 494 vput(vp); 495 mtx_enter(&mntvnode_mtx, MTX_DEF); 496 } 497 mtx_exit(&mntvnode_mtx, MTX_DEF); 498 return (0); 499} 500 501/* 502 * Common code for mount and mountroot 503 */ 504int 505ffs_mountfs(devvp, mp, p, malloctype) 506 register struct vnode *devvp; 507 struct mount *mp; 508 struct proc *p; 509 struct malloc_type *malloctype; 510{ 511 register struct ufsmount *ump; 512 struct buf *bp; 513 register struct fs *fs; 514 dev_t dev; 515 struct partinfo dpart; 516 void *space; 517 int error, i, blks, size, ronly; 518 int32_t *lp; 519 struct ucred *cred; 520 u_int64_t maxfilesize; /* XXX */ 521 size_t strsize; 522 int ncount; 523 524 dev = devvp->v_rdev; 525 cred = p ? p->p_ucred : NOCRED; 526 /* 527 * Disallow multiple mounts of the same device. 528 * Disallow mounting of a device that is currently in use 529 * (except for root, which might share swap device for miniroot). 530 * Flush out any old buffers remaining from a previous use. 531 */ 532 error = vfs_mountedon(devvp); 533 if (error) 534 return (error); 535 ncount = vcount(devvp); 536 537 if (ncount > 1 && devvp != rootvp) 538 return (EBUSY); 539 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p); 540 error = vinvalbuf(devvp, V_SAVE, cred, p, 0, 0); 541 VOP_UNLOCK(devvp, 0, p); 542 if (error) 543 return (error); 544 545 /* 546 * Only VMIO the backing device if the backing device is a real 547 * block device. This excludes the original MFS implementation. 548 * Note that it is optional that the backing device be VMIOed. This 549 * increases the opportunity for metadata caching. 550 */ 551 if (devvp->v_tag != VT_MFS && vn_isdisk(devvp, NULL)) { 552 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p); 553 vfs_object_create(devvp, p, cred); 554 mtx_enter(&devvp->v_interlock, MTX_DEF); 555 VOP_UNLOCK(devvp, LK_INTERLOCK, p); 556 } 557 558 ronly = (mp->mnt_flag & MNT_RDONLY) != 0; 559 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p); 560 error = VOP_OPEN(devvp, ronly ? FREAD : FREAD|FWRITE, FSCRED, p); 561 VOP_UNLOCK(devvp, 0, p); 562 if (error) 563 return (error); 564 if (devvp->v_rdev->si_iosize_max > mp->mnt_iosize_max) 565 mp->mnt_iosize_max = devvp->v_rdev->si_iosize_max; 566 if (mp->mnt_iosize_max > MAXPHYS) 567 mp->mnt_iosize_max = MAXPHYS; 568 569 if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, cred, p) != 0) 570 size = DEV_BSIZE; 571 else 572 size = dpart.disklab->d_secsize; 573 574 bp = NULL; 575 ump = NULL; 576 if ((error = bread(devvp, SBLOCK, SBSIZE, cred, &bp)) != 0) 577 goto out; 578 fs = (struct fs *)bp->b_data; 579 if (fs->fs_magic != FS_MAGIC || fs->fs_bsize > MAXBSIZE || 580 fs->fs_bsize < sizeof(struct fs)) { 581 error = EINVAL; /* XXX needs translation */ 582 goto out; 583 } 584 fs->fs_fmod = 0; 585 fs->fs_flags &= ~FS_UNCLEAN; 586 if (fs->fs_clean == 0) { 587 fs->fs_flags |= FS_UNCLEAN; 588 if (ronly || (mp->mnt_flag & MNT_FORCE)) { 589 printf( 590"WARNING: %s was not properly dismounted\n", 591 fs->fs_fsmnt); 592 } else { 593 printf( 594"WARNING: R/W mount of %s denied. Filesystem is not clean - run fsck\n", 595 fs->fs_fsmnt); 596 error = EPERM; 597 goto out; 598 } 599 } 600 /* XXX updating 4.2 FFS superblocks trashes rotational layout tables */ 601 if (fs->fs_postblformat == FS_42POSTBLFMT && !ronly) { 602 error = EROFS; /* needs translation */ 603 goto out; 604 } 605 ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK | M_ZERO); 606 ump->um_malloctype = malloctype; 607 ump->um_i_effnlink_valid = 1; 608 ump->um_fs = malloc((u_long)fs->fs_sbsize, M_UFSMNT, 609 M_WAITOK); 610 ump->um_blkatoff = ffs_blkatoff; 611 ump->um_truncate = ffs_truncate; 612 ump->um_update = ffs_update; 613 ump->um_valloc = ffs_valloc; 614 ump->um_vfree = ffs_vfree; 615 bcopy(bp->b_data, ump->um_fs, (u_int)fs->fs_sbsize); 616 if (fs->fs_sbsize < SBSIZE) 617 bp->b_flags |= B_INVAL | B_NOCACHE; 618 brelse(bp); 619 bp = NULL; 620 fs = ump->um_fs; 621 fs->fs_ronly = ronly; 622 size = fs->fs_cssize; 623 blks = howmany(size, fs->fs_fsize); 624 if (fs->fs_contigsumsize > 0) 625 size += fs->fs_ncg * sizeof(int32_t); 626 space = malloc((u_long)size, M_UFSMNT, M_WAITOK); 627 fs->fs_csp = space; 628 for (i = 0; i < blks; i += fs->fs_frag) { 629 size = fs->fs_bsize; 630 if (i + fs->fs_frag > blks) 631 size = (blks - i) * fs->fs_fsize; 632 if ((error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size, 633 cred, &bp)) != 0) { 634 free(fs->fs_csp, M_UFSMNT); 635 goto out; 636 } 637 bcopy(bp->b_data, space, (u_int)size); 638 space = (char *)space + size; 639 brelse(bp); 640 bp = NULL; 641 } 642 if (fs->fs_contigsumsize > 0) { 643 fs->fs_maxcluster = lp = space; 644 for (i = 0; i < fs->fs_ncg; i++) 645 *lp++ = fs->fs_contigsumsize; 646 } 647 mp->mnt_data = (qaddr_t)ump; 648 mp->mnt_stat.f_fsid.val[0] = fs->fs_id[0]; 649 mp->mnt_stat.f_fsid.val[1] = fs->fs_id[1]; 650 if (fs->fs_id[0] == 0 || fs->fs_id[1] == 0 || 651 vfs_getvfs(&mp->mnt_stat.f_fsid)) 652 vfs_getnewfsid(mp); 653 mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen; 654 mp->mnt_flag |= MNT_LOCAL; 655 ump->um_mountp = mp; 656 ump->um_dev = dev; 657 ump->um_devvp = devvp; 658 ump->um_nindir = fs->fs_nindir; 659 ump->um_bptrtodb = fs->fs_fsbtodb; 660 ump->um_seqinc = fs->fs_frag; 661 for (i = 0; i < MAXQUOTAS; i++) 662 ump->um_quotas[i] = NULLVP; 663#ifdef FFS_EXTATTR 664 ufs_extattr_uepm_init(&ump->um_extattr); 665#endif 666 devvp->v_rdev->si_mountpoint = mp; 667 ffs_oldfscompat(fs); 668 669 /* 670 * Set FS local "last mounted on" information (NULL pad) 671 */ 672 copystr( mp->mnt_stat.f_mntonname, /* mount point*/ 673 fs->fs_fsmnt, /* copy area*/ 674 sizeof(fs->fs_fsmnt) - 1, /* max size*/ 675 &strsize); /* real size*/ 676 bzero( fs->fs_fsmnt + strsize, sizeof(fs->fs_fsmnt) - strsize); 677 678 if( mp->mnt_flag & MNT_ROOTFS) { 679 /* 680 * Root mount; update timestamp in mount structure. 681 * this will be used by the common root mount code 682 * to update the system clock. 683 */ 684 mp->mnt_time = fs->fs_time; 685 } 686 687 ump->um_savedmaxfilesize = fs->fs_maxfilesize; /* XXX */ 688 maxfilesize = (u_int64_t)0x40000000 * fs->fs_bsize - 1; /* XXX */ 689 if (fs->fs_maxfilesize > maxfilesize) /* XXX */ 690 fs->fs_maxfilesize = maxfilesize; /* XXX */ 691 if (ronly == 0) { 692 if ((fs->fs_flags & FS_DOSOFTDEP) && 693 (error = softdep_mount(devvp, mp, fs, cred)) != 0) { 694 free(fs->fs_csp, M_UFSMNT); 695 goto out; 696 } 697 if (fs->fs_snapinum[0] != 0) 698 ffs_snapshot_mount(mp); 699 fs->fs_fmod = 1; 700 fs->fs_clean = 0; 701 (void) ffs_sbupdate(ump, MNT_WAIT); 702 } 703#ifdef FFS_EXTATTR 704 /* 705 * XXX Auto-starting of EAs would go here. 706 * 707 * Auto-starting would: 708 * - check for /.attribute in the fs, and extattr_start if so 709 * - for each file in .attribute, enable that file with 710 * an attribute of the same name. 711 * Not clear how to report errors -- probably eat them. 712 * This would all happen while the file system was busy/not 713 * available, so would effectively be "atomic". 714 */ 715 /* ufs_extattr_autostart(mp, ump); */ 716#endif 717 return (0); 718out: 719 devvp->v_rdev->si_mountpoint = NULL; 720 if (bp) 721 brelse(bp); 722 (void)VOP_CLOSE(devvp, ronly ? FREAD : FREAD|FWRITE, cred, p); 723 if (ump) { 724 free(ump->um_fs, M_UFSMNT); 725 free(ump, M_UFSMNT); 726 mp->mnt_data = (qaddr_t)0; 727 } 728 return (error); 729} 730 731/* 732 * Sanity checks for old file systems. 733 * 734 * XXX - goes away some day. 735 */ 736static int 737ffs_oldfscompat(fs) 738 struct fs *fs; 739{ 740 741 fs->fs_npsect = max(fs->fs_npsect, fs->fs_nsect); /* XXX */ 742 fs->fs_interleave = max(fs->fs_interleave, 1); /* XXX */ 743 if (fs->fs_postblformat == FS_42POSTBLFMT) /* XXX */ 744 fs->fs_nrpos = 8; /* XXX */ 745 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */ 746#if 0 747 int i; /* XXX */ 748 u_int64_t sizepb = fs->fs_bsize; /* XXX */ 749 /* XXX */ 750 fs->fs_maxfilesize = fs->fs_bsize * NDADDR - 1; /* XXX */ 751 for (i = 0; i < NIADDR; i++) { /* XXX */ 752 sizepb *= NINDIR(fs); /* XXX */ 753 fs->fs_maxfilesize += sizepb; /* XXX */ 754 } /* XXX */ 755#endif 756 fs->fs_maxfilesize = (u_quad_t) 1LL << 39; 757 fs->fs_qbmask = ~fs->fs_bmask; /* XXX */ 758 fs->fs_qfmask = ~fs->fs_fmask; /* XXX */ 759 } /* XXX */ 760 return (0); 761} 762 763/* 764 * unmount system call 765 */ 766int 767ffs_unmount(mp, mntflags, p) 768 struct mount *mp; 769 int mntflags; 770 struct proc *p; 771{ 772 register struct ufsmount *ump = VFSTOUFS(mp); 773 register struct fs *fs; 774 int error, flags; 775 776 flags = 0; 777 if (mntflags & MNT_FORCE) { 778 flags |= FORCECLOSE; 779 } 780#ifdef FFS_EXTATTR 781 if ((error = ufs_extattr_stop(mp, p))) 782 if (error != EOPNOTSUPP) 783 printf("ffs_unmount: ufs_extattr_stop returned %d\n", 784 error); 785 ufs_extattr_uepm_destroy(&ump->um_extattr); 786#endif 787 if (mp->mnt_flag & MNT_SOFTDEP) { 788 if ((error = softdep_flushfiles(mp, flags, p)) != 0) 789 return (error); 790 } else { 791 if ((error = ffs_flushfiles(mp, flags, p)) != 0) 792 return (error); 793 } 794 fs = ump->um_fs; 795 if (fs->fs_ronly == 0) { 796 fs->fs_clean = fs->fs_flags & FS_UNCLEAN ? 0 : 1; 797 error = ffs_sbupdate(ump, MNT_WAIT); 798 if (error) { 799 fs->fs_clean = 0; 800 return (error); 801 } 802 } 803 ump->um_devvp->v_rdev->si_mountpoint = NULL; 804 805 vinvalbuf(ump->um_devvp, V_SAVE, NOCRED, p, 0, 0); 806 error = VOP_CLOSE(ump->um_devvp, fs->fs_ronly ? FREAD : FREAD|FWRITE, 807 NOCRED, p); 808 809 vrele(ump->um_devvp); 810 811 free(fs->fs_csp, M_UFSMNT); 812 free(fs, M_UFSMNT); 813 free(ump, M_UFSMNT); 814 mp->mnt_data = (qaddr_t)0; 815 mp->mnt_flag &= ~MNT_LOCAL; 816 return (error); 817} 818 819/* 820 * Flush out all the files in a filesystem. 821 */ 822int 823ffs_flushfiles(mp, flags, p) 824 register struct mount *mp; 825 int flags; 826 struct proc *p; 827{ 828 register struct ufsmount *ump; 829 int error; 830 831 ump = VFSTOUFS(mp); 832#ifdef QUOTA 833 if (mp->mnt_flag & MNT_QUOTA) { 834 int i; 835 error = vflush(mp, NULLVP, SKIPSYSTEM|flags); 836 if (error) 837 return (error); 838 for (i = 0; i < MAXQUOTAS; i++) { 839 if (ump->um_quotas[i] == NULLVP) 840 continue; 841 quotaoff(p, mp, i); 842 } 843 /* 844 * Here we fall through to vflush again to ensure 845 * that we have gotten rid of all the system vnodes. 846 */ 847 } 848#endif 849 if (ump->um_devvp->v_flag & VCOPYONWRITE) { 850 if ((error = vflush(mp, NULL, SKIPSYSTEM | flags)) != 0) 851 return (error); 852 ffs_snapshot_unmount(mp); 853 /* 854 * Here we fall through to vflush again to ensure 855 * that we have gotten rid of all the system vnodes. 856 */ 857 } 858 /* 859 * Flush all the files. 860 */ 861 if ((error = vflush(mp, NULL, flags)) != 0) 862 return (error); 863 /* 864 * Flush filesystem metadata. 865 */ 866 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY, p); 867 error = VOP_FSYNC(ump->um_devvp, p->p_ucred, MNT_WAIT, p); 868 VOP_UNLOCK(ump->um_devvp, 0, p); 869 return (error); 870} 871 872/* 873 * Get file system statistics. 874 */ 875int 876ffs_statfs(mp, sbp, p) 877 struct mount *mp; 878 register struct statfs *sbp; 879 struct proc *p; 880{ 881 register struct ufsmount *ump; 882 register struct fs *fs; 883 884 ump = VFSTOUFS(mp); 885 fs = ump->um_fs; 886 if (fs->fs_magic != FS_MAGIC) 887 panic("ffs_statfs"); 888 sbp->f_bsize = fs->fs_fsize; 889 sbp->f_iosize = fs->fs_bsize; 890 sbp->f_blocks = fs->fs_dsize; 891 sbp->f_bfree = fs->fs_cstotal.cs_nbfree * fs->fs_frag + 892 fs->fs_cstotal.cs_nffree; 893 sbp->f_bavail = freespace(fs, fs->fs_minfree); 894 sbp->f_files = fs->fs_ncg * fs->fs_ipg - ROOTINO; 895 sbp->f_ffree = fs->fs_cstotal.cs_nifree; 896 if (sbp != &mp->mnt_stat) { 897 sbp->f_type = mp->mnt_vfc->vfc_typenum; 898 bcopy((caddr_t)mp->mnt_stat.f_mntonname, 899 (caddr_t)&sbp->f_mntonname[0], MNAMELEN); 900 bcopy((caddr_t)mp->mnt_stat.f_mntfromname, 901 (caddr_t)&sbp->f_mntfromname[0], MNAMELEN); 902 } 903 return (0); 904} 905 906/* 907 * Go through the disk queues to initiate sandbagged IO; 908 * go through the inodes to write those that have been modified; 909 * initiate the writing of the super block if it has been modified. 910 * 911 * Note: we are always called with the filesystem marked `MPBUSY'. 912 */ 913int 914ffs_sync(mp, waitfor, cred, p) 915 struct mount *mp; 916 int waitfor; 917 struct ucred *cred; 918 struct proc *p; 919{ 920 struct vnode *nvp, *vp; 921 struct inode *ip; 922 struct ufsmount *ump = VFSTOUFS(mp); 923 struct fs *fs; 924 int error, count, wait, lockreq, allerror = 0; 925 926 fs = ump->um_fs; 927 if (fs->fs_fmod != 0 && fs->fs_ronly != 0) { /* XXX */ 928 printf("fs = %s\n", fs->fs_fsmnt); 929 panic("ffs_sync: rofs mod"); 930 } 931 /* 932 * Write back each (modified) inode. 933 */ 934 wait = 0; 935 lockreq = LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK; 936 if (waitfor == MNT_WAIT) { 937 wait = 1; 938 lockreq = LK_EXCLUSIVE | LK_INTERLOCK; 939 } 940 mtx_enter(&mntvnode_mtx, MTX_DEF); 941loop: 942 for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) { 943 /* 944 * If the vnode that we are about to sync is no longer 945 * associated with this mount point, start over. 946 */ 947 if (vp->v_mount != mp) 948 goto loop; 949 mtx_enter(&vp->v_interlock, MTX_DEF); 950 nvp = LIST_NEXT(vp, v_mntvnodes); 951 ip = VTOI(vp); 952 if (vp->v_type == VNON || ((ip->i_flag & 953 (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 && 954 TAILQ_EMPTY(&vp->v_dirtyblkhd))) { 955 mtx_exit(&vp->v_interlock, MTX_DEF); 956 continue; 957 } 958 if (vp->v_type != VCHR) { 959 mtx_exit(&mntvnode_mtx, MTX_DEF); 960 if ((error = vget(vp, lockreq, p)) != 0) { 961 mtx_enter(&mntvnode_mtx, MTX_DEF); 962 if (error == ENOENT) 963 goto loop; 964 continue; 965 } 966 if ((error = VOP_FSYNC(vp, cred, waitfor, p)) != 0) 967 allerror = error; 968 VOP_UNLOCK(vp, 0, p); 969 vrele(vp); 970 mtx_enter(&mntvnode_mtx, MTX_DEF); 971 } else { 972 mtx_exit(&mntvnode_mtx, MTX_DEF); 973 mtx_exit(&vp->v_interlock, MTX_DEF); 974 UFS_UPDATE(vp, wait); 975 mtx_enter(&mntvnode_mtx, MTX_DEF); 976 } 977 } 978 mtx_exit(&mntvnode_mtx, MTX_DEF); 979 /* 980 * Force stale file system control information to be flushed. 981 */ 982 if (waitfor == MNT_WAIT) { 983 if ((error = softdep_flushworklist(ump->um_mountp, &count, p))) 984 allerror = error; 985 /* Flushed work items may create new vnodes to clean */ 986 if (count) { 987 mtx_enter(&mntvnode_mtx, MTX_DEF); 988 goto loop; 989 } 990 } 991 if (waitfor == MNT_NOWAIT) { 992 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY, p); 993 if ((error = VOP_FSYNC(ump->um_devvp, cred, waitfor, p)) != 0) 994 allerror = error; 995 VOP_UNLOCK(ump->um_devvp, 0, p); 996 } 997#ifdef QUOTA 998 qsync(mp); 999#endif 1000 /* 1001 * Write back modified superblock. 1002 */ 1003 if (fs->fs_fmod != 0 && (error = ffs_sbupdate(ump, waitfor)) != 0) 1004 allerror = error; 1005 return (allerror); 1006} 1007 1008/* 1009 * Look up a FFS dinode number to find its incore vnode, otherwise read it 1010 * in from disk. If it is in core, wait for the lock bit to clear, then 1011 * return the inode locked. Detection and handling of mount points must be 1012 * done by the calling routine. 1013 */ 1014static int ffs_inode_hash_lock; 1015/* 1016 * ffs_inode_hash_lock is a variable to manage mutual exclusion 1017 * of vnode allocation and intertion to the hash, especially to 1018 * avoid holding more than one vnodes for the same inode in the 1019 * hash table. ffs_inode_hash_lock must hence be tested-and-set 1020 * or cleared atomically, accomplished by ffs_inode_hash_mtx. 1021 * 1022 * As vnode allocation may block during MALLOC() and zone 1023 * allocation, we should also do msleep() to give away the CPU 1024 * if anyone else is allocating a vnode. lockmgr is not suitable 1025 * here because someone else may insert to the hash table the 1026 * vnode we are trying to allocate during our sleep, in which 1027 * case the hash table needs to be examined once again after 1028 * waking up. 1029 */ 1030static struct mtx ffs_inode_hash_mtx; 1031 1032int 1033ffs_vget(mp, ino, vpp) 1034 struct mount *mp; 1035 ino_t ino; 1036 struct vnode **vpp; 1037{ 1038 struct fs *fs; 1039 struct inode *ip; 1040 struct ufsmount *ump; 1041 struct buf *bp; 1042 struct vnode *vp; 1043 dev_t dev; 1044 int error, want_wakeup; 1045 1046 ump = VFSTOUFS(mp); 1047 dev = ump->um_dev; 1048restart: 1049 if ((*vpp = ufs_ihashget(dev, ino)) != NULL) { 1050 return (0); 1051 } 1052 1053 /* 1054 * Lock out the creation of new entries in the FFS hash table in 1055 * case getnewvnode() or MALLOC() blocks, otherwise a duplicate 1056 * may occur! 1057 */ 1058 mtx_enter(&ffs_inode_hash_mtx, MTX_DEF); 1059 if (ffs_inode_hash_lock) { 1060 while (ffs_inode_hash_lock) { 1061 ffs_inode_hash_lock = -1; 1062 msleep(&ffs_inode_hash_lock, &ffs_inode_hash_mtx, PVM, "ffsvgt", 0); 1063 } 1064 mtx_exit(&ffs_inode_hash_mtx, MTX_DEF); 1065 goto restart; 1066 } 1067 ffs_inode_hash_lock = 1; 1068 mtx_exit(&ffs_inode_hash_mtx, MTX_DEF); 1069 1070 /* 1071 * If this MALLOC() is performed after the getnewvnode() 1072 * it might block, leaving a vnode with a NULL v_data to be 1073 * found by ffs_sync() if a sync happens to fire right then, 1074 * which will cause a panic because ffs_sync() blindly 1075 * dereferences vp->v_data (as well it should). 1076 */ 1077 MALLOC(ip, struct inode *, sizeof(struct inode), 1078 ump->um_malloctype, M_WAITOK); 1079 1080 /* Allocate a new vnode/inode. */ 1081 error = getnewvnode(VT_UFS, mp, ffs_vnodeop_p, &vp); 1082 if (error) { 1083 /* 1084 * Do not wake up processes while holding the mutex, 1085 * otherwise the processes waken up immediately hit 1086 * themselves into the mutex. 1087 */ 1088 mtx_enter(&ffs_inode_hash_mtx, MTX_DEF); 1089 want_wakeup = ffs_inode_hash_lock < 0; 1090 ffs_inode_hash_lock = 0; 1091 mtx_exit(&ffs_inode_hash_mtx, MTX_DEF); 1092 if (want_wakeup) 1093 wakeup(&ffs_inode_hash_lock); 1094 *vpp = NULL; 1095 FREE(ip, ump->um_malloctype); 1096 return (error); 1097 } 1098 bzero((caddr_t)ip, sizeof(struct inode)); 1099 /* 1100 * FFS supports lock sharing in the stack of vnodes 1101 */ 1102 vp->v_vnlock = &vp->v_lock; 1103 lockinit(vp->v_vnlock, PINOD, "inode", 0, LK_CANRECURSE); 1104 vp->v_data = ip; 1105 ip->i_vnode = vp; 1106 ip->i_fs = fs = ump->um_fs; 1107 ip->i_dev = dev; 1108 ip->i_number = ino; 1109#ifdef QUOTA 1110 { 1111 int i; 1112 for (i = 0; i < MAXQUOTAS; i++) 1113 ip->i_dquot[i] = NODQUOT; 1114 } 1115#endif 1116 /* 1117 * Put it onto its hash chain and lock it so that other requests for 1118 * this inode will block if they arrive while we are sleeping waiting 1119 * for old data structures to be purged or for the contents of the 1120 * disk portion of this inode to be read. 1121 */ 1122 ufs_ihashins(ip); 1123 1124 /* 1125 * Do not wake up processes while holding the mutex, 1126 * otherwise the processes waken up immediately hit 1127 * themselves into the mutex. 1128 */ 1129 mtx_enter(&ffs_inode_hash_mtx, MTX_DEF); 1130 want_wakeup = ffs_inode_hash_lock < 0; 1131 ffs_inode_hash_lock = 0; 1132 mtx_exit(&ffs_inode_hash_mtx, MTX_DEF); 1133 if (want_wakeup) 1134 wakeup(&ffs_inode_hash_lock); 1135 1136 /* Read in the disk contents for the inode, copy into the inode. */ 1137 error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)), 1138 (int)fs->fs_bsize, NOCRED, &bp); 1139 if (error) { 1140 /* 1141 * The inode does not contain anything useful, so it would 1142 * be misleading to leave it on its hash chain. With mode 1143 * still zero, it will be unlinked and returned to the free 1144 * list by vput(). 1145 */ 1146 brelse(bp); 1147 vput(vp); 1148 *vpp = NULL; 1149 return (error); 1150 } 1151 ip->i_din = *((struct dinode *)bp->b_data + ino_to_fsbo(fs, ino)); 1152 if (DOINGSOFTDEP(vp)) 1153 softdep_load_inodeblock(ip); 1154 else 1155 ip->i_effnlink = ip->i_nlink; 1156 bqrelse(bp); 1157 1158 /* 1159 * Initialize the vnode from the inode, check for aliases. 1160 * Note that the underlying vnode may have changed. 1161 */ 1162 error = ufs_vinit(mp, ffs_specop_p, ffs_fifoop_p, &vp); 1163 if (error) { 1164 vput(vp); 1165 *vpp = NULL; 1166 return (error); 1167 } 1168 /* 1169 * Finish inode initialization now that aliasing has been resolved. 1170 */ 1171 ip->i_devvp = ump->um_devvp; 1172 VREF(ip->i_devvp); 1173 /* 1174 * Set up a generation number for this inode if it does not 1175 * already have one. This should only happen on old filesystems. 1176 */ 1177 if (ip->i_gen == 0) { 1178 ip->i_gen = random() / 2 + 1; 1179 if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0) 1180 ip->i_flag |= IN_MODIFIED; 1181 } 1182 /* 1183 * Ensure that uid and gid are correct. This is a temporary 1184 * fix until fsck has been changed to do the update. 1185 */ 1186 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */ 1187 ip->i_uid = ip->i_din.di_ouid; /* XXX */ 1188 ip->i_gid = ip->i_din.di_ogid; /* XXX */ 1189 } /* XXX */ 1190 1191 *vpp = vp; 1192 return (0); 1193} 1194 1195/* 1196 * File handle to vnode 1197 * 1198 * Have to be really careful about stale file handles: 1199 * - check that the inode number is valid 1200 * - call ffs_vget() to get the locked inode 1201 * - check for an unallocated inode (i_mode == 0) 1202 * - check that the given client host has export rights and return 1203 * those rights via. exflagsp and credanonp 1204 */ 1205int 1206ffs_fhtovp(mp, fhp, vpp) 1207 register struct mount *mp; 1208 struct fid *fhp; 1209 struct vnode **vpp; 1210{ 1211 register struct ufid *ufhp; 1212 struct fs *fs; 1213 1214 ufhp = (struct ufid *)fhp; 1215 fs = VFSTOUFS(mp)->um_fs; 1216 if (ufhp->ufid_ino < ROOTINO || 1217 ufhp->ufid_ino >= fs->fs_ncg * fs->fs_ipg) 1218 return (ESTALE); 1219 return (ufs_fhtovp(mp, ufhp, vpp)); 1220} 1221 1222/* 1223 * Vnode pointer to File handle 1224 */ 1225/* ARGSUSED */ 1226int 1227ffs_vptofh(vp, fhp) 1228 struct vnode *vp; 1229 struct fid *fhp; 1230{ 1231 register struct inode *ip; 1232 register struct ufid *ufhp; 1233 1234 ip = VTOI(vp); 1235 ufhp = (struct ufid *)fhp; 1236 ufhp->ufid_len = sizeof(struct ufid); 1237 ufhp->ufid_ino = ip->i_number; 1238 ufhp->ufid_gen = ip->i_gen; 1239 return (0); 1240} 1241 1242/* 1243 * Initialize the filesystem; just use ufs_init. 1244 */ 1245static int 1246ffs_init(vfsp) 1247 struct vfsconf *vfsp; 1248{ 1249 1250 softdep_initialize(); 1251 mtx_init(&ffs_inode_hash_mtx, "ifsvgt", MTX_DEF); 1252 return (ufs_init(vfsp)); 1253} 1254 1255/* 1256 * Write a superblock and associated information back to disk. 1257 */ 1258static int 1259ffs_sbupdate(mp, waitfor) 1260 struct ufsmount *mp; 1261 int waitfor; 1262{ 1263 register struct fs *dfs, *fs = mp->um_fs; 1264 register struct buf *bp; 1265 int blks; 1266 void *space; 1267 int i, size, error, allerror = 0; 1268 1269 /* 1270 * First write back the summary information. 1271 */ 1272 blks = howmany(fs->fs_cssize, fs->fs_fsize); 1273 space = fs->fs_csp; 1274 for (i = 0; i < blks; i += fs->fs_frag) { 1275 size = fs->fs_bsize; 1276 if (i + fs->fs_frag > blks) 1277 size = (blks - i) * fs->fs_fsize; 1278 bp = getblk(mp->um_devvp, fsbtodb(fs, fs->fs_csaddr + i), 1279 size, 0, 0); 1280 bcopy(space, bp->b_data, (u_int)size); 1281 space = (char *)space + size; 1282 if (waitfor != MNT_WAIT) 1283 bawrite(bp); 1284 else if ((error = bwrite(bp)) != 0) 1285 allerror = error; 1286 } 1287 /* 1288 * Now write back the superblock itself. If any errors occurred 1289 * up to this point, then fail so that the superblock avoids 1290 * being written out as clean. 1291 */ 1292 if (allerror) 1293 return (allerror); 1294 bp = getblk(mp->um_devvp, SBLOCK, (int)fs->fs_sbsize, 0, 0); 1295 fs->fs_fmod = 0; 1296 fs->fs_time = time_second; 1297 bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize); 1298 /* Restore compatibility to old file systems. XXX */ 1299 dfs = (struct fs *)bp->b_data; /* XXX */ 1300 if (fs->fs_postblformat == FS_42POSTBLFMT) /* XXX */ 1301 dfs->fs_nrpos = -1; /* XXX */ 1302 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */ 1303 int32_t *lp, tmp; /* XXX */ 1304 /* XXX */ 1305 lp = (int32_t *)&dfs->fs_qbmask; /* XXX */ 1306 tmp = lp[4]; /* XXX */ 1307 for (i = 4; i > 0; i--) /* XXX */ 1308 lp[i] = lp[i-1]; /* XXX */ 1309 lp[0] = tmp; /* XXX */ 1310 } /* XXX */ 1311 dfs->fs_maxfilesize = mp->um_savedmaxfilesize; /* XXX */ 1312 if (waitfor != MNT_WAIT) 1313 bawrite(bp); 1314 else if ((error = bwrite(bp)) != 0) 1315 allerror = error; 1316 return (allerror); 1317} 1318