ffs_vfsops.c revision 301310
1/*- 2 * Copyright (c) 1989, 1991, 1993, 1994 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)ffs_vfsops.c 8.31 (Berkeley) 5/20/95 30 */ 31 32#include <sys/cdefs.h> 33__FBSDID("$FreeBSD: stable/10/sys/ufs/ffs/ffs_vfsops.c 301310 2016-06-04 10:16:58Z kib $"); 34 35#include "opt_quota.h" 36#include "opt_ufs.h" 37#include "opt_ffs.h" 38#include "opt_ddb.h" 39 40#include <sys/param.h> 41#include <sys/systm.h> 42#include <sys/namei.h> 43#include <sys/priv.h> 44#include <sys/proc.h> 45#include <sys/taskqueue.h> 46#include <sys/kernel.h> 47#include <sys/vnode.h> 48#include <sys/mount.h> 49#include <sys/bio.h> 50#include <sys/buf.h> 51#include <sys/conf.h> 52#include <sys/fcntl.h> 53#include <sys/ioccom.h> 54#include <sys/malloc.h> 55#include <sys/mutex.h> 56#include <sys/rwlock.h> 57 58#include <security/mac/mac_framework.h> 59 60#include <ufs/ufs/extattr.h> 61#include <ufs/ufs/gjournal.h> 62#include <ufs/ufs/quota.h> 63#include <ufs/ufs/ufsmount.h> 64#include <ufs/ufs/inode.h> 65#include <ufs/ufs/ufs_extern.h> 66 67#include <ufs/ffs/fs.h> 68#include <ufs/ffs/ffs_extern.h> 69 70#include <vm/vm.h> 71#include <vm/uma.h> 72#include <vm/vm_page.h> 73 74#include <geom/geom.h> 75#include <geom/geom_vfs.h> 76 77#include <ddb/ddb.h> 78 79static uma_zone_t uma_inode, uma_ufs1, uma_ufs2; 80 81static int ffs_mountfs(struct vnode *, struct mount *, struct thread *); 82static void ffs_oldfscompat_read(struct fs *, struct ufsmount *, 83 ufs2_daddr_t); 84static void ffs_ifree(struct ufsmount *ump, struct inode *ip); 85static int ffs_sync_lazy(struct mount *mp); 86 87static vfs_init_t ffs_init; 88static vfs_uninit_t ffs_uninit; 89static vfs_extattrctl_t ffs_extattrctl; 90static vfs_cmount_t ffs_cmount; 91static vfs_unmount_t ffs_unmount; 92static vfs_mount_t ffs_mount; 93static vfs_statfs_t ffs_statfs; 94static vfs_fhtovp_t ffs_fhtovp; 95static vfs_sync_t ffs_sync; 96 97static struct vfsops ufs_vfsops = { 98 .vfs_extattrctl = ffs_extattrctl, 99 .vfs_fhtovp = ffs_fhtovp, 100 .vfs_init = ffs_init, 101 .vfs_mount = ffs_mount, 102 .vfs_cmount = ffs_cmount, 103 .vfs_quotactl = ufs_quotactl, 104 .vfs_root = ufs_root, 105 .vfs_statfs = ffs_statfs, 106 .vfs_sync = ffs_sync, 107 .vfs_uninit = ffs_uninit, 108 .vfs_unmount = ffs_unmount, 109 .vfs_vget = ffs_vget, 110 .vfs_susp_clean = process_deferred_inactive, 111}; 112 113VFS_SET(ufs_vfsops, ufs, 0); 114MODULE_VERSION(ufs, 1); 115 116static b_strategy_t ffs_geom_strategy; 117static b_write_t ffs_bufwrite; 118 119static struct buf_ops ffs_ops = { 120 .bop_name = "FFS", 121 .bop_write = ffs_bufwrite, 122 .bop_strategy = ffs_geom_strategy, 123 .bop_sync = bufsync, 124#ifdef NO_FFS_SNAPSHOT 125 .bop_bdflush = bufbdflush, 126#else 127 .bop_bdflush = ffs_bdflush, 128#endif 129}; 130 131/* 132 * Note that userquota and groupquota options are not currently used 133 * by UFS/FFS code and generally mount(8) does not pass those options 134 * from userland, but they can be passed by loader(8) via 135 * vfs.root.mountfrom.options. 136 */ 137static const char *ffs_opts[] = { "acls", "async", "noatime", "noclusterr", 138 "noclusterw", "noexec", "export", "force", "from", "groupquota", 139 "multilabel", "nfsv4acls", "fsckpid", "snapshot", "nosuid", "suiddir", 140 "nosymfollow", "sync", "union", "userquota", NULL }; 141 142static int 143ffs_mount(struct mount *mp) 144{ 145 struct vnode *devvp; 146 struct thread *td; 147 struct ufsmount *ump = NULL; 148 struct fs *fs; 149 pid_t fsckpid = 0; 150 int error, flags; 151 uint64_t mntorflags; 152 accmode_t accmode; 153 struct nameidata ndp; 154 char *fspec; 155 156 td = curthread; 157 if (vfs_filteropt(mp->mnt_optnew, ffs_opts)) 158 return (EINVAL); 159 if (uma_inode == NULL) { 160 uma_inode = uma_zcreate("FFS inode", 161 sizeof(struct inode), NULL, NULL, NULL, NULL, 162 UMA_ALIGN_PTR, 0); 163 uma_ufs1 = uma_zcreate("FFS1 dinode", 164 sizeof(struct ufs1_dinode), NULL, NULL, NULL, NULL, 165 UMA_ALIGN_PTR, 0); 166 uma_ufs2 = uma_zcreate("FFS2 dinode", 167 sizeof(struct ufs2_dinode), NULL, NULL, NULL, NULL, 168 UMA_ALIGN_PTR, 0); 169 } 170 171 vfs_deleteopt(mp->mnt_optnew, "groupquota"); 172 vfs_deleteopt(mp->mnt_optnew, "userquota"); 173 174 fspec = vfs_getopts(mp->mnt_optnew, "from", &error); 175 if (error) 176 return (error); 177 178 mntorflags = 0; 179 if (vfs_getopt(mp->mnt_optnew, "acls", NULL, NULL) == 0) 180 mntorflags |= MNT_ACLS; 181 182 if (vfs_getopt(mp->mnt_optnew, "snapshot", NULL, NULL) == 0) { 183 mntorflags |= MNT_SNAPSHOT; 184 /* 185 * Once we have set the MNT_SNAPSHOT flag, do not 186 * persist "snapshot" in the options list. 187 */ 188 vfs_deleteopt(mp->mnt_optnew, "snapshot"); 189 vfs_deleteopt(mp->mnt_opt, "snapshot"); 190 } 191 192 if (vfs_getopt(mp->mnt_optnew, "fsckpid", NULL, NULL) == 0 && 193 vfs_scanopt(mp->mnt_optnew, "fsckpid", "%d", &fsckpid) == 1) { 194 /* 195 * Once we have set the restricted PID, do not 196 * persist "fsckpid" in the options list. 197 */ 198 vfs_deleteopt(mp->mnt_optnew, "fsckpid"); 199 vfs_deleteopt(mp->mnt_opt, "fsckpid"); 200 if (mp->mnt_flag & MNT_UPDATE) { 201 if (VFSTOUFS(mp)->um_fs->fs_ronly == 0 && 202 vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) == 0) { 203 vfs_mount_error(mp, 204 "Checker enable: Must be read-only"); 205 return (EINVAL); 206 } 207 } else if (vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) == 0) { 208 vfs_mount_error(mp, 209 "Checker enable: Must be read-only"); 210 return (EINVAL); 211 } 212 /* Set to -1 if we are done */ 213 if (fsckpid == 0) 214 fsckpid = -1; 215 } 216 217 if (vfs_getopt(mp->mnt_optnew, "nfsv4acls", NULL, NULL) == 0) { 218 if (mntorflags & MNT_ACLS) { 219 vfs_mount_error(mp, 220 "\"acls\" and \"nfsv4acls\" options " 221 "are mutually exclusive"); 222 return (EINVAL); 223 } 224 mntorflags |= MNT_NFS4ACLS; 225 } 226 227 MNT_ILOCK(mp); 228 mp->mnt_flag |= mntorflags; 229 MNT_IUNLOCK(mp); 230 /* 231 * If updating, check whether changing from read-only to 232 * read/write; if there is no device name, that's all we do. 233 */ 234 if (mp->mnt_flag & MNT_UPDATE) { 235 ump = VFSTOUFS(mp); 236 fs = ump->um_fs; 237 devvp = ump->um_devvp; 238 if (fsckpid == -1 && ump->um_fsckpid > 0) { 239 if ((error = ffs_flushfiles(mp, WRITECLOSE, td)) != 0 || 240 (error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) 241 return (error); 242 DROP_GIANT(); 243 g_topology_lock(); 244 /* 245 * Return to normal read-only mode. 246 */ 247 error = g_access(ump->um_cp, 0, -1, 0); 248 g_topology_unlock(); 249 PICKUP_GIANT(); 250 ump->um_fsckpid = 0; 251 } 252 if (fs->fs_ronly == 0 && 253 vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0)) { 254 /* 255 * Flush any dirty data and suspend filesystem. 256 */ 257 if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0) 258 return (error); 259 error = vfs_write_suspend_umnt(mp); 260 if (error != 0) 261 return (error); 262 /* 263 * Check for and optionally get rid of files open 264 * for writing. 265 */ 266 flags = WRITECLOSE; 267 if (mp->mnt_flag & MNT_FORCE) 268 flags |= FORCECLOSE; 269 if (MOUNTEDSOFTDEP(mp)) { 270 error = softdep_flushfiles(mp, flags, td); 271 } else { 272 error = ffs_flushfiles(mp, flags, td); 273 } 274 if (error) { 275 vfs_write_resume(mp, 0); 276 return (error); 277 } 278 if (fs->fs_pendingblocks != 0 || 279 fs->fs_pendinginodes != 0) { 280 printf("WARNING: %s Update error: blocks %jd " 281 "files %d\n", fs->fs_fsmnt, 282 (intmax_t)fs->fs_pendingblocks, 283 fs->fs_pendinginodes); 284 fs->fs_pendingblocks = 0; 285 fs->fs_pendinginodes = 0; 286 } 287 if ((fs->fs_flags & (FS_UNCLEAN | FS_NEEDSFSCK)) == 0) 288 fs->fs_clean = 1; 289 if ((error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) { 290 fs->fs_ronly = 0; 291 fs->fs_clean = 0; 292 vfs_write_resume(mp, 0); 293 return (error); 294 } 295 if (MOUNTEDSOFTDEP(mp)) 296 softdep_unmount(mp); 297 DROP_GIANT(); 298 g_topology_lock(); 299 /* 300 * Drop our write and exclusive access. 301 */ 302 g_access(ump->um_cp, 0, -1, -1); 303 g_topology_unlock(); 304 PICKUP_GIANT(); 305 fs->fs_ronly = 1; 306 MNT_ILOCK(mp); 307 mp->mnt_flag |= MNT_RDONLY; 308 MNT_IUNLOCK(mp); 309 /* 310 * Allow the writers to note that filesystem 311 * is ro now. 312 */ 313 vfs_write_resume(mp, 0); 314 } 315 if ((mp->mnt_flag & MNT_RELOAD) && 316 (error = ffs_reload(mp, td, 0)) != 0) 317 return (error); 318 if (fs->fs_ronly && 319 !vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0)) { 320 /* 321 * If we are running a checker, do not allow upgrade. 322 */ 323 if (ump->um_fsckpid > 0) { 324 vfs_mount_error(mp, 325 "Active checker, cannot upgrade to write"); 326 return (EINVAL); 327 } 328 /* 329 * If upgrade to read-write by non-root, then verify 330 * that user has necessary permissions on the device. 331 */ 332 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 333 error = VOP_ACCESS(devvp, VREAD | VWRITE, 334 td->td_ucred, td); 335 if (error) 336 error = priv_check(td, PRIV_VFS_MOUNT_PERM); 337 if (error) { 338 VOP_UNLOCK(devvp, 0); 339 return (error); 340 } 341 VOP_UNLOCK(devvp, 0); 342 fs->fs_flags &= ~FS_UNCLEAN; 343 if (fs->fs_clean == 0) { 344 fs->fs_flags |= FS_UNCLEAN; 345 if ((mp->mnt_flag & MNT_FORCE) || 346 ((fs->fs_flags & 347 (FS_SUJ | FS_NEEDSFSCK)) == 0 && 348 (fs->fs_flags & FS_DOSOFTDEP))) { 349 printf("WARNING: %s was not properly " 350 "dismounted\n", fs->fs_fsmnt); 351 } else { 352 vfs_mount_error(mp, 353 "R/W mount of %s denied. %s.%s", 354 fs->fs_fsmnt, 355 "Filesystem is not clean - run fsck", 356 (fs->fs_flags & FS_SUJ) == 0 ? "" : 357 " Forced mount will invalidate" 358 " journal contents"); 359 return (EPERM); 360 } 361 } 362 DROP_GIANT(); 363 g_topology_lock(); 364 /* 365 * Request exclusive write access. 366 */ 367 error = g_access(ump->um_cp, 0, 1, 1); 368 g_topology_unlock(); 369 PICKUP_GIANT(); 370 if (error) 371 return (error); 372 if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0) 373 return (error); 374 fs->fs_ronly = 0; 375 MNT_ILOCK(mp); 376 mp->mnt_flag &= ~MNT_RDONLY; 377 MNT_IUNLOCK(mp); 378 fs->fs_mtime = time_second; 379 /* check to see if we need to start softdep */ 380 if ((fs->fs_flags & FS_DOSOFTDEP) && 381 (error = softdep_mount(devvp, mp, fs, td->td_ucred))){ 382 vn_finished_write(mp); 383 return (error); 384 } 385 fs->fs_clean = 0; 386 if ((error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) { 387 vn_finished_write(mp); 388 return (error); 389 } 390 if (fs->fs_snapinum[0] != 0) 391 ffs_snapshot_mount(mp); 392 vn_finished_write(mp); 393 } 394 /* 395 * Soft updates is incompatible with "async", 396 * so if we are doing softupdates stop the user 397 * from setting the async flag in an update. 398 * Softdep_mount() clears it in an initial mount 399 * or ro->rw remount. 400 */ 401 if (MOUNTEDSOFTDEP(mp)) { 402 /* XXX: Reset too late ? */ 403 MNT_ILOCK(mp); 404 mp->mnt_flag &= ~MNT_ASYNC; 405 MNT_IUNLOCK(mp); 406 } 407 /* 408 * Keep MNT_ACLS flag if it is stored in superblock. 409 */ 410 if ((fs->fs_flags & FS_ACLS) != 0) { 411 /* XXX: Set too late ? */ 412 MNT_ILOCK(mp); 413 mp->mnt_flag |= MNT_ACLS; 414 MNT_IUNLOCK(mp); 415 } 416 417 if ((fs->fs_flags & FS_NFS4ACLS) != 0) { 418 /* XXX: Set too late ? */ 419 MNT_ILOCK(mp); 420 mp->mnt_flag |= MNT_NFS4ACLS; 421 MNT_IUNLOCK(mp); 422 } 423 /* 424 * If this is a request from fsck to clean up the filesystem, 425 * then allow the specified pid to proceed. 426 */ 427 if (fsckpid > 0) { 428 if (ump->um_fsckpid != 0) { 429 vfs_mount_error(mp, 430 "Active checker already running on %s", 431 fs->fs_fsmnt); 432 return (EINVAL); 433 } 434 KASSERT(MOUNTEDSOFTDEP(mp) == 0, 435 ("soft updates enabled on read-only file system")); 436 DROP_GIANT(); 437 g_topology_lock(); 438 /* 439 * Request write access. 440 */ 441 error = g_access(ump->um_cp, 0, 1, 0); 442 g_topology_unlock(); 443 PICKUP_GIANT(); 444 if (error) { 445 vfs_mount_error(mp, 446 "Checker activation failed on %s", 447 fs->fs_fsmnt); 448 return (error); 449 } 450 ump->um_fsckpid = fsckpid; 451 if (fs->fs_snapinum[0] != 0) 452 ffs_snapshot_mount(mp); 453 fs->fs_mtime = time_second; 454 fs->fs_fmod = 1; 455 fs->fs_clean = 0; 456 (void) ffs_sbupdate(ump, MNT_WAIT, 0); 457 } 458 459 /* 460 * If this is a snapshot request, take the snapshot. 461 */ 462 if (mp->mnt_flag & MNT_SNAPSHOT) 463 return (ffs_snapshot(mp, fspec)); 464 } 465 466 /* 467 * Not an update, or updating the name: look up the name 468 * and verify that it refers to a sensible disk device. 469 */ 470 NDINIT(&ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, fspec, td); 471 if ((error = namei(&ndp)) != 0) 472 return (error); 473 NDFREE(&ndp, NDF_ONLY_PNBUF); 474 devvp = ndp.ni_vp; 475 if (!vn_isdisk(devvp, &error)) { 476 vput(devvp); 477 return (error); 478 } 479 480 /* 481 * If mount by non-root, then verify that user has necessary 482 * permissions on the device. 483 */ 484 accmode = VREAD; 485 if ((mp->mnt_flag & MNT_RDONLY) == 0) 486 accmode |= VWRITE; 487 error = VOP_ACCESS(devvp, accmode, td->td_ucred, td); 488 if (error) 489 error = priv_check(td, PRIV_VFS_MOUNT_PERM); 490 if (error) { 491 vput(devvp); 492 return (error); 493 } 494 495 if (mp->mnt_flag & MNT_UPDATE) { 496 /* 497 * Update only 498 * 499 * If it's not the same vnode, or at least the same device 500 * then it's not correct. 501 */ 502 503 if (devvp->v_rdev != ump->um_devvp->v_rdev) 504 error = EINVAL; /* needs translation */ 505 vput(devvp); 506 if (error) 507 return (error); 508 } else { 509 /* 510 * New mount 511 * 512 * We need the name for the mount point (also used for 513 * "last mounted on") copied in. If an error occurs, 514 * the mount point is discarded by the upper level code. 515 * Note that vfs_mount_alloc() populates f_mntonname for us. 516 */ 517 if ((error = ffs_mountfs(devvp, mp, td)) != 0) { 518 vrele(devvp); 519 return (error); 520 } 521 if (fsckpid > 0) { 522 KASSERT(MOUNTEDSOFTDEP(mp) == 0, 523 ("soft updates enabled on read-only file system")); 524 ump = VFSTOUFS(mp); 525 fs = ump->um_fs; 526 DROP_GIANT(); 527 g_topology_lock(); 528 /* 529 * Request write access. 530 */ 531 error = g_access(ump->um_cp, 0, 1, 0); 532 g_topology_unlock(); 533 PICKUP_GIANT(); 534 if (error) { 535 printf("WARNING: %s: Checker activation " 536 "failed\n", fs->fs_fsmnt); 537 } else { 538 ump->um_fsckpid = fsckpid; 539 if (fs->fs_snapinum[0] != 0) 540 ffs_snapshot_mount(mp); 541 fs->fs_mtime = time_second; 542 fs->fs_clean = 0; 543 (void) ffs_sbupdate(ump, MNT_WAIT, 0); 544 } 545 } 546 } 547 vfs_mountedfrom(mp, fspec); 548 return (0); 549} 550 551/* 552 * Compatibility with old mount system call. 553 */ 554 555static int 556ffs_cmount(struct mntarg *ma, void *data, uint64_t flags) 557{ 558 struct ufs_args args; 559 struct export_args exp; 560 int error; 561 562 if (data == NULL) 563 return (EINVAL); 564 error = copyin(data, &args, sizeof args); 565 if (error) 566 return (error); 567 vfs_oexport_conv(&args.export, &exp); 568 569 ma = mount_argsu(ma, "from", args.fspec, MAXPATHLEN); 570 ma = mount_arg(ma, "export", &exp, sizeof(exp)); 571 error = kernel_mount(ma, flags); 572 573 return (error); 574} 575 576/* 577 * Reload all incore data for a filesystem (used after running fsck on 578 * the root filesystem and finding things to fix). If the 'force' flag 579 * is 0, the filesystem must be mounted read-only. 580 * 581 * Things to do to update the mount: 582 * 1) invalidate all cached meta-data. 583 * 2) re-read superblock from disk. 584 * 3) re-read summary information from disk. 585 * 4) invalidate all inactive vnodes. 586 * 5) invalidate all cached file data. 587 * 6) re-read inode data for all active vnodes. 588 */ 589int 590ffs_reload(struct mount *mp, struct thread *td, int force) 591{ 592 struct vnode *vp, *mvp, *devvp; 593 struct inode *ip; 594 void *space; 595 struct buf *bp; 596 struct fs *fs, *newfs; 597 struct ufsmount *ump; 598 ufs2_daddr_t sblockloc; 599 int i, blks, size, error; 600 int32_t *lp; 601 602 ump = VFSTOUFS(mp); 603 604 MNT_ILOCK(mp); 605 if ((mp->mnt_flag & MNT_RDONLY) == 0 && force == 0) { 606 MNT_IUNLOCK(mp); 607 return (EINVAL); 608 } 609 MNT_IUNLOCK(mp); 610 611 /* 612 * Step 1: invalidate all cached meta-data. 613 */ 614 devvp = VFSTOUFS(mp)->um_devvp; 615 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 616 if (vinvalbuf(devvp, 0, 0, 0) != 0) 617 panic("ffs_reload: dirty1"); 618 VOP_UNLOCK(devvp, 0); 619 620 /* 621 * Step 2: re-read superblock from disk. 622 */ 623 fs = VFSTOUFS(mp)->um_fs; 624 if ((error = bread(devvp, btodb(fs->fs_sblockloc), fs->fs_sbsize, 625 NOCRED, &bp)) != 0) 626 return (error); 627 newfs = (struct fs *)bp->b_data; 628 if ((newfs->fs_magic != FS_UFS1_MAGIC && 629 newfs->fs_magic != FS_UFS2_MAGIC) || 630 newfs->fs_bsize > MAXBSIZE || 631 newfs->fs_bsize < sizeof(struct fs)) { 632 brelse(bp); 633 return (EIO); /* XXX needs translation */ 634 } 635 /* 636 * Copy pointer fields back into superblock before copying in XXX 637 * new superblock. These should really be in the ufsmount. XXX 638 * Note that important parameters (eg fs_ncg) are unchanged. 639 */ 640 newfs->fs_csp = fs->fs_csp; 641 newfs->fs_maxcluster = fs->fs_maxcluster; 642 newfs->fs_contigdirs = fs->fs_contigdirs; 643 newfs->fs_active = fs->fs_active; 644 newfs->fs_ronly = fs->fs_ronly; 645 sblockloc = fs->fs_sblockloc; 646 bcopy(newfs, fs, (u_int)fs->fs_sbsize); 647 brelse(bp); 648 mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen; 649 ffs_oldfscompat_read(fs, VFSTOUFS(mp), sblockloc); 650 UFS_LOCK(ump); 651 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) { 652 printf("WARNING: %s: reload pending error: blocks %jd " 653 "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks, 654 fs->fs_pendinginodes); 655 fs->fs_pendingblocks = 0; 656 fs->fs_pendinginodes = 0; 657 } 658 UFS_UNLOCK(ump); 659 660 /* 661 * Step 3: re-read summary information from disk. 662 */ 663 size = fs->fs_cssize; 664 blks = howmany(size, fs->fs_fsize); 665 if (fs->fs_contigsumsize > 0) 666 size += fs->fs_ncg * sizeof(int32_t); 667 size += fs->fs_ncg * sizeof(u_int8_t); 668 free(fs->fs_csp, M_UFSMNT); 669 space = malloc((u_long)size, M_UFSMNT, M_WAITOK); 670 fs->fs_csp = space; 671 for (i = 0; i < blks; i += fs->fs_frag) { 672 size = fs->fs_bsize; 673 if (i + fs->fs_frag > blks) 674 size = (blks - i) * fs->fs_fsize; 675 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size, 676 NOCRED, &bp); 677 if (error) 678 return (error); 679 bcopy(bp->b_data, space, (u_int)size); 680 space = (char *)space + size; 681 brelse(bp); 682 } 683 /* 684 * We no longer know anything about clusters per cylinder group. 685 */ 686 if (fs->fs_contigsumsize > 0) { 687 fs->fs_maxcluster = lp = space; 688 for (i = 0; i < fs->fs_ncg; i++) 689 *lp++ = fs->fs_contigsumsize; 690 space = lp; 691 } 692 size = fs->fs_ncg * sizeof(u_int8_t); 693 fs->fs_contigdirs = (u_int8_t *)space; 694 bzero(fs->fs_contigdirs, size); 695 696loop: 697 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 698 /* 699 * Skip syncer vnode. 700 */ 701 if (vp->v_type == VNON) { 702 VI_UNLOCK(vp); 703 continue; 704 } 705 /* 706 * Step 4: invalidate all cached file data. 707 */ 708 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) { 709 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 710 goto loop; 711 } 712 if (vinvalbuf(vp, 0, 0, 0)) 713 panic("ffs_reload: dirty2"); 714 /* 715 * Step 5: re-read inode data for all active vnodes. 716 */ 717 ip = VTOI(vp); 718 error = 719 bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), 720 (int)fs->fs_bsize, NOCRED, &bp); 721 if (error) { 722 VOP_UNLOCK(vp, 0); 723 vrele(vp); 724 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 725 return (error); 726 } 727 ffs_load_inode(bp, ip, fs, ip->i_number); 728 ip->i_effnlink = ip->i_nlink; 729 brelse(bp); 730 VOP_UNLOCK(vp, 0); 731 vrele(vp); 732 } 733 return (0); 734} 735 736/* 737 * Possible superblock locations ordered from most to least likely. 738 */ 739static int sblock_try[] = SBLOCKSEARCH; 740 741/* 742 * Common code for mount and mountroot 743 */ 744static int 745ffs_mountfs(devvp, mp, td) 746 struct vnode *devvp; 747 struct mount *mp; 748 struct thread *td; 749{ 750 struct ufsmount *ump; 751 struct buf *bp; 752 struct fs *fs; 753 struct cdev *dev; 754 void *space; 755 ufs2_daddr_t sblockloc; 756 int error, i, blks, size, ronly; 757 int32_t *lp; 758 struct ucred *cred; 759 struct g_consumer *cp; 760 struct mount *nmp; 761 762 bp = NULL; 763 ump = NULL; 764 cred = td ? td->td_ucred : NOCRED; 765 ronly = (mp->mnt_flag & MNT_RDONLY) != 0; 766 767 KASSERT(devvp->v_type == VCHR, ("reclaimed devvp")); 768 dev = devvp->v_rdev; 769 if (atomic_cmpset_acq_ptr((uintptr_t *)&dev->si_mountpt, 0, 770 (uintptr_t)mp) == 0) { 771 VOP_UNLOCK(devvp, 0); 772 return (EBUSY); 773 } 774 DROP_GIANT(); 775 g_topology_lock(); 776 error = g_vfs_open(devvp, &cp, "ffs", ronly ? 0 : 1); 777 g_topology_unlock(); 778 PICKUP_GIANT(); 779 if (error != 0) { 780 atomic_store_rel_ptr((uintptr_t *)&dev->si_mountpt, 0); 781 VOP_UNLOCK(devvp, 0); 782 return (error); 783 } 784 dev_ref(dev); 785 devvp->v_bufobj.bo_ops = &ffs_ops; 786 VOP_UNLOCK(devvp, 0); 787 if (dev->si_iosize_max != 0) 788 mp->mnt_iosize_max = dev->si_iosize_max; 789 if (mp->mnt_iosize_max > MAXPHYS) 790 mp->mnt_iosize_max = MAXPHYS; 791 792 fs = NULL; 793 sblockloc = 0; 794 /* 795 * Try reading the superblock in each of its possible locations. 796 */ 797 for (i = 0; sblock_try[i] != -1; i++) { 798 if ((SBLOCKSIZE % cp->provider->sectorsize) != 0) { 799 error = EINVAL; 800 vfs_mount_error(mp, 801 "Invalid sectorsize %d for superblock size %d", 802 cp->provider->sectorsize, SBLOCKSIZE); 803 goto out; 804 } 805 if ((error = bread(devvp, btodb(sblock_try[i]), SBLOCKSIZE, 806 cred, &bp)) != 0) 807 goto out; 808 fs = (struct fs *)bp->b_data; 809 sblockloc = sblock_try[i]; 810 if ((fs->fs_magic == FS_UFS1_MAGIC || 811 (fs->fs_magic == FS_UFS2_MAGIC && 812 (fs->fs_sblockloc == sblockloc || 813 (fs->fs_old_flags & FS_FLAGS_UPDATED) == 0))) && 814 fs->fs_bsize <= MAXBSIZE && 815 fs->fs_bsize >= sizeof(struct fs)) 816 break; 817 brelse(bp); 818 bp = NULL; 819 } 820 if (sblock_try[i] == -1) { 821 error = EINVAL; /* XXX needs translation */ 822 goto out; 823 } 824 fs->fs_fmod = 0; 825 fs->fs_flags &= ~FS_INDEXDIRS; /* no support for directory indicies */ 826 fs->fs_flags &= ~FS_UNCLEAN; 827 if (fs->fs_clean == 0) { 828 fs->fs_flags |= FS_UNCLEAN; 829 if (ronly || (mp->mnt_flag & MNT_FORCE) || 830 ((fs->fs_flags & (FS_SUJ | FS_NEEDSFSCK)) == 0 && 831 (fs->fs_flags & FS_DOSOFTDEP))) { 832 printf("WARNING: %s was not properly dismounted\n", 833 fs->fs_fsmnt); 834 } else { 835 vfs_mount_error(mp, "R/W mount of %s denied. %s%s", 836 fs->fs_fsmnt, "Filesystem is not clean - run fsck.", 837 (fs->fs_flags & FS_SUJ) == 0 ? "" : 838 " Forced mount will invalidate journal contents"); 839 error = EPERM; 840 goto out; 841 } 842 if ((fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) && 843 (mp->mnt_flag & MNT_FORCE)) { 844 printf("WARNING: %s: lost blocks %jd files %d\n", 845 fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks, 846 fs->fs_pendinginodes); 847 fs->fs_pendingblocks = 0; 848 fs->fs_pendinginodes = 0; 849 } 850 } 851 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) { 852 printf("WARNING: %s: mount pending error: blocks %jd " 853 "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks, 854 fs->fs_pendinginodes); 855 fs->fs_pendingblocks = 0; 856 fs->fs_pendinginodes = 0; 857 } 858 if ((fs->fs_flags & FS_GJOURNAL) != 0) { 859#ifdef UFS_GJOURNAL 860 /* 861 * Get journal provider name. 862 */ 863 size = 1024; 864 mp->mnt_gjprovider = malloc(size, M_UFSMNT, M_WAITOK); 865 if (g_io_getattr("GJOURNAL::provider", cp, &size, 866 mp->mnt_gjprovider) == 0) { 867 mp->mnt_gjprovider = realloc(mp->mnt_gjprovider, size, 868 M_UFSMNT, M_WAITOK); 869 MNT_ILOCK(mp); 870 mp->mnt_flag |= MNT_GJOURNAL; 871 MNT_IUNLOCK(mp); 872 } else { 873 printf("WARNING: %s: GJOURNAL flag on fs " 874 "but no gjournal provider below\n", 875 mp->mnt_stat.f_mntonname); 876 free(mp->mnt_gjprovider, M_UFSMNT); 877 mp->mnt_gjprovider = NULL; 878 } 879#else 880 printf("WARNING: %s: GJOURNAL flag on fs but no " 881 "UFS_GJOURNAL support\n", mp->mnt_stat.f_mntonname); 882#endif 883 } else { 884 mp->mnt_gjprovider = NULL; 885 } 886 ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK | M_ZERO); 887 ump->um_cp = cp; 888 ump->um_bo = &devvp->v_bufobj; 889 ump->um_fs = malloc((u_long)fs->fs_sbsize, M_UFSMNT, M_WAITOK); 890 if (fs->fs_magic == FS_UFS1_MAGIC) { 891 ump->um_fstype = UFS1; 892 ump->um_balloc = ffs_balloc_ufs1; 893 } else { 894 ump->um_fstype = UFS2; 895 ump->um_balloc = ffs_balloc_ufs2; 896 } 897 ump->um_blkatoff = ffs_blkatoff; 898 ump->um_truncate = ffs_truncate; 899 ump->um_update = ffs_update; 900 ump->um_valloc = ffs_valloc; 901 ump->um_vfree = ffs_vfree; 902 ump->um_ifree = ffs_ifree; 903 ump->um_rdonly = ffs_rdonly; 904 ump->um_snapgone = ffs_snapgone; 905 mtx_init(UFS_MTX(ump), "FFS", "FFS Lock", MTX_DEF); 906 bcopy(bp->b_data, ump->um_fs, (u_int)fs->fs_sbsize); 907 if (fs->fs_sbsize < SBLOCKSIZE) 908 bp->b_flags |= B_INVAL | B_NOCACHE; 909 brelse(bp); 910 bp = NULL; 911 fs = ump->um_fs; 912 ffs_oldfscompat_read(fs, ump, sblockloc); 913 fs->fs_ronly = ronly; 914 size = fs->fs_cssize; 915 blks = howmany(size, fs->fs_fsize); 916 if (fs->fs_contigsumsize > 0) 917 size += fs->fs_ncg * sizeof(int32_t); 918 size += fs->fs_ncg * sizeof(u_int8_t); 919 space = malloc((u_long)size, M_UFSMNT, M_WAITOK); 920 fs->fs_csp = space; 921 for (i = 0; i < blks; i += fs->fs_frag) { 922 size = fs->fs_bsize; 923 if (i + fs->fs_frag > blks) 924 size = (blks - i) * fs->fs_fsize; 925 if ((error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size, 926 cred, &bp)) != 0) { 927 free(fs->fs_csp, M_UFSMNT); 928 goto out; 929 } 930 bcopy(bp->b_data, space, (u_int)size); 931 space = (char *)space + size; 932 brelse(bp); 933 bp = NULL; 934 } 935 if (fs->fs_contigsumsize > 0) { 936 fs->fs_maxcluster = lp = space; 937 for (i = 0; i < fs->fs_ncg; i++) 938 *lp++ = fs->fs_contigsumsize; 939 space = lp; 940 } 941 size = fs->fs_ncg * sizeof(u_int8_t); 942 fs->fs_contigdirs = (u_int8_t *)space; 943 bzero(fs->fs_contigdirs, size); 944 fs->fs_active = NULL; 945 mp->mnt_data = ump; 946 mp->mnt_stat.f_fsid.val[0] = fs->fs_id[0]; 947 mp->mnt_stat.f_fsid.val[1] = fs->fs_id[1]; 948 nmp = NULL; 949 if (fs->fs_id[0] == 0 || fs->fs_id[1] == 0 || 950 (nmp = vfs_getvfs(&mp->mnt_stat.f_fsid))) { 951 if (nmp) 952 vfs_rel(nmp); 953 vfs_getnewfsid(mp); 954 } 955 mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen; 956 MNT_ILOCK(mp); 957 mp->mnt_flag |= MNT_LOCAL; 958 MNT_IUNLOCK(mp); 959 if ((fs->fs_flags & FS_MULTILABEL) != 0) { 960#ifdef MAC 961 MNT_ILOCK(mp); 962 mp->mnt_flag |= MNT_MULTILABEL; 963 MNT_IUNLOCK(mp); 964#else 965 printf("WARNING: %s: multilabel flag on fs but " 966 "no MAC support\n", mp->mnt_stat.f_mntonname); 967#endif 968 } 969 if ((fs->fs_flags & FS_ACLS) != 0) { 970#ifdef UFS_ACL 971 MNT_ILOCK(mp); 972 973 if (mp->mnt_flag & MNT_NFS4ACLS) 974 printf("WARNING: %s: ACLs flag on fs conflicts with " 975 "\"nfsv4acls\" mount option; option ignored\n", 976 mp->mnt_stat.f_mntonname); 977 mp->mnt_flag &= ~MNT_NFS4ACLS; 978 mp->mnt_flag |= MNT_ACLS; 979 980 MNT_IUNLOCK(mp); 981#else 982 printf("WARNING: %s: ACLs flag on fs but no ACLs support\n", 983 mp->mnt_stat.f_mntonname); 984#endif 985 } 986 if ((fs->fs_flags & FS_NFS4ACLS) != 0) { 987#ifdef UFS_ACL 988 MNT_ILOCK(mp); 989 990 if (mp->mnt_flag & MNT_ACLS) 991 printf("WARNING: %s: NFSv4 ACLs flag on fs conflicts " 992 "with \"acls\" mount option; option ignored\n", 993 mp->mnt_stat.f_mntonname); 994 mp->mnt_flag &= ~MNT_ACLS; 995 mp->mnt_flag |= MNT_NFS4ACLS; 996 997 MNT_IUNLOCK(mp); 998#else 999 printf("WARNING: %s: NFSv4 ACLs flag on fs but no " 1000 "ACLs support\n", mp->mnt_stat.f_mntonname); 1001#endif 1002 } 1003 if ((fs->fs_flags & FS_TRIM) != 0) { 1004 size = sizeof(int); 1005 if (g_io_getattr("GEOM::candelete", cp, &size, 1006 &ump->um_candelete) == 0) { 1007 if (!ump->um_candelete) 1008 printf("WARNING: %s: TRIM flag on fs but disk " 1009 "does not support TRIM\n", 1010 mp->mnt_stat.f_mntonname); 1011 } else { 1012 printf("WARNING: %s: TRIM flag on fs but disk does " 1013 "not confirm that it supports TRIM\n", 1014 mp->mnt_stat.f_mntonname); 1015 ump->um_candelete = 0; 1016 } 1017 if (ump->um_candelete) { 1018 ump->um_trim_tq = taskqueue_create("trim", M_WAITOK, 1019 taskqueue_thread_enqueue, &ump->um_trim_tq); 1020 taskqueue_start_threads(&ump->um_trim_tq, 1, PVFS, 1021 "%s trim", mp->mnt_stat.f_mntonname); 1022 } 1023 } 1024 1025 ump->um_mountp = mp; 1026 ump->um_dev = dev; 1027 ump->um_devvp = devvp; 1028 ump->um_nindir = fs->fs_nindir; 1029 ump->um_bptrtodb = fs->fs_fsbtodb; 1030 ump->um_seqinc = fs->fs_frag; 1031 for (i = 0; i < MAXQUOTAS; i++) 1032 ump->um_quotas[i] = NULLVP; 1033#ifdef UFS_EXTATTR 1034 ufs_extattr_uepm_init(&ump->um_extattr); 1035#endif 1036 /* 1037 * Set FS local "last mounted on" information (NULL pad) 1038 */ 1039 bzero(fs->fs_fsmnt, MAXMNTLEN); 1040 strlcpy(fs->fs_fsmnt, mp->mnt_stat.f_mntonname, MAXMNTLEN); 1041 mp->mnt_stat.f_iosize = fs->fs_bsize; 1042 1043 if (mp->mnt_flag & MNT_ROOTFS) { 1044 /* 1045 * Root mount; update timestamp in mount structure. 1046 * this will be used by the common root mount code 1047 * to update the system clock. 1048 */ 1049 mp->mnt_time = fs->fs_time; 1050 } 1051 1052 if (ronly == 0) { 1053 fs->fs_mtime = time_second; 1054 if ((fs->fs_flags & FS_DOSOFTDEP) && 1055 (error = softdep_mount(devvp, mp, fs, cred)) != 0) { 1056 free(fs->fs_csp, M_UFSMNT); 1057 ffs_flushfiles(mp, FORCECLOSE, td); 1058 goto out; 1059 } 1060 if (fs->fs_snapinum[0] != 0) 1061 ffs_snapshot_mount(mp); 1062 fs->fs_fmod = 1; 1063 fs->fs_clean = 0; 1064 (void) ffs_sbupdate(ump, MNT_WAIT, 0); 1065 } 1066 /* 1067 * Initialize filesystem state information in mount struct. 1068 */ 1069 MNT_ILOCK(mp); 1070 mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED | 1071 MNTK_NO_IOPF | MNTK_UNMAPPED_BUFS | MNTK_USES_BCACHE; 1072 MNT_IUNLOCK(mp); 1073#ifdef UFS_EXTATTR 1074#ifdef UFS_EXTATTR_AUTOSTART 1075 /* 1076 * 1077 * Auto-starting does the following: 1078 * - check for /.attribute in the fs, and extattr_start if so 1079 * - for each file in .attribute, enable that file with 1080 * an attribute of the same name. 1081 * Not clear how to report errors -- probably eat them. 1082 * This would all happen while the filesystem was busy/not 1083 * available, so would effectively be "atomic". 1084 */ 1085 (void) ufs_extattr_autostart(mp, td); 1086#endif /* !UFS_EXTATTR_AUTOSTART */ 1087#endif /* !UFS_EXTATTR */ 1088 return (0); 1089out: 1090 if (bp) 1091 brelse(bp); 1092 if (cp != NULL) { 1093 DROP_GIANT(); 1094 g_topology_lock(); 1095 g_vfs_close(cp); 1096 g_topology_unlock(); 1097 PICKUP_GIANT(); 1098 } 1099 if (ump) { 1100 mtx_destroy(UFS_MTX(ump)); 1101 if (mp->mnt_gjprovider != NULL) { 1102 free(mp->mnt_gjprovider, M_UFSMNT); 1103 mp->mnt_gjprovider = NULL; 1104 } 1105 free(ump->um_fs, M_UFSMNT); 1106 free(ump, M_UFSMNT); 1107 mp->mnt_data = NULL; 1108 } 1109 atomic_store_rel_ptr((uintptr_t *)&dev->si_mountpt, 0); 1110 dev_rel(dev); 1111 return (error); 1112} 1113 1114#include <sys/sysctl.h> 1115static int bigcgs = 0; 1116SYSCTL_INT(_debug, OID_AUTO, bigcgs, CTLFLAG_RW, &bigcgs, 0, ""); 1117 1118/* 1119 * Sanity checks for loading old filesystem superblocks. 1120 * See ffs_oldfscompat_write below for unwound actions. 1121 * 1122 * XXX - Parts get retired eventually. 1123 * Unfortunately new bits get added. 1124 */ 1125static void 1126ffs_oldfscompat_read(fs, ump, sblockloc) 1127 struct fs *fs; 1128 struct ufsmount *ump; 1129 ufs2_daddr_t sblockloc; 1130{ 1131 off_t maxfilesize; 1132 1133 /* 1134 * If not yet done, update fs_flags location and value of fs_sblockloc. 1135 */ 1136 if ((fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) { 1137 fs->fs_flags = fs->fs_old_flags; 1138 fs->fs_old_flags |= FS_FLAGS_UPDATED; 1139 fs->fs_sblockloc = sblockloc; 1140 } 1141 /* 1142 * If not yet done, update UFS1 superblock with new wider fields. 1143 */ 1144 if (fs->fs_magic == FS_UFS1_MAGIC && fs->fs_maxbsize != fs->fs_bsize) { 1145 fs->fs_maxbsize = fs->fs_bsize; 1146 fs->fs_time = fs->fs_old_time; 1147 fs->fs_size = fs->fs_old_size; 1148 fs->fs_dsize = fs->fs_old_dsize; 1149 fs->fs_csaddr = fs->fs_old_csaddr; 1150 fs->fs_cstotal.cs_ndir = fs->fs_old_cstotal.cs_ndir; 1151 fs->fs_cstotal.cs_nbfree = fs->fs_old_cstotal.cs_nbfree; 1152 fs->fs_cstotal.cs_nifree = fs->fs_old_cstotal.cs_nifree; 1153 fs->fs_cstotal.cs_nffree = fs->fs_old_cstotal.cs_nffree; 1154 } 1155 if (fs->fs_magic == FS_UFS1_MAGIC && 1156 fs->fs_old_inodefmt < FS_44INODEFMT) { 1157 fs->fs_maxfilesize = ((uint64_t)1 << 31) - 1; 1158 fs->fs_qbmask = ~fs->fs_bmask; 1159 fs->fs_qfmask = ~fs->fs_fmask; 1160 } 1161 if (fs->fs_magic == FS_UFS1_MAGIC) { 1162 ump->um_savedmaxfilesize = fs->fs_maxfilesize; 1163 maxfilesize = (uint64_t)0x80000000 * fs->fs_bsize - 1; 1164 if (fs->fs_maxfilesize > maxfilesize) 1165 fs->fs_maxfilesize = maxfilesize; 1166 } 1167 /* Compatibility for old filesystems */ 1168 if (fs->fs_avgfilesize <= 0) 1169 fs->fs_avgfilesize = AVFILESIZ; 1170 if (fs->fs_avgfpdir <= 0) 1171 fs->fs_avgfpdir = AFPDIR; 1172 if (bigcgs) { 1173 fs->fs_save_cgsize = fs->fs_cgsize; 1174 fs->fs_cgsize = fs->fs_bsize; 1175 } 1176} 1177 1178/* 1179 * Unwinding superblock updates for old filesystems. 1180 * See ffs_oldfscompat_read above for details. 1181 * 1182 * XXX - Parts get retired eventually. 1183 * Unfortunately new bits get added. 1184 */ 1185void 1186ffs_oldfscompat_write(fs, ump) 1187 struct fs *fs; 1188 struct ufsmount *ump; 1189{ 1190 1191 /* 1192 * Copy back UFS2 updated fields that UFS1 inspects. 1193 */ 1194 if (fs->fs_magic == FS_UFS1_MAGIC) { 1195 fs->fs_old_time = fs->fs_time; 1196 fs->fs_old_cstotal.cs_ndir = fs->fs_cstotal.cs_ndir; 1197 fs->fs_old_cstotal.cs_nbfree = fs->fs_cstotal.cs_nbfree; 1198 fs->fs_old_cstotal.cs_nifree = fs->fs_cstotal.cs_nifree; 1199 fs->fs_old_cstotal.cs_nffree = fs->fs_cstotal.cs_nffree; 1200 fs->fs_maxfilesize = ump->um_savedmaxfilesize; 1201 } 1202 if (bigcgs) { 1203 fs->fs_cgsize = fs->fs_save_cgsize; 1204 fs->fs_save_cgsize = 0; 1205 } 1206} 1207 1208/* 1209 * unmount system call 1210 */ 1211static int 1212ffs_unmount(mp, mntflags) 1213 struct mount *mp; 1214 int mntflags; 1215{ 1216 struct thread *td; 1217 struct ufsmount *ump = VFSTOUFS(mp); 1218 struct fs *fs; 1219 int error, flags, susp; 1220#ifdef UFS_EXTATTR 1221 int e_restart; 1222#endif 1223 1224 flags = 0; 1225 td = curthread; 1226 fs = ump->um_fs; 1227 susp = 0; 1228 if (mntflags & MNT_FORCE) { 1229 flags |= FORCECLOSE; 1230 susp = fs->fs_ronly == 0; 1231 } 1232#ifdef UFS_EXTATTR 1233 if ((error = ufs_extattr_stop(mp, td))) { 1234 if (error != EOPNOTSUPP) 1235 printf("WARNING: unmount %s: ufs_extattr_stop " 1236 "returned errno %d\n", mp->mnt_stat.f_mntonname, 1237 error); 1238 e_restart = 0; 1239 } else { 1240 ufs_extattr_uepm_destroy(&ump->um_extattr); 1241 e_restart = 1; 1242 } 1243#endif 1244 if (susp) { 1245 error = vfs_write_suspend_umnt(mp); 1246 if (error != 0) 1247 goto fail1; 1248 } 1249 if (MOUNTEDSOFTDEP(mp)) 1250 error = softdep_flushfiles(mp, flags, td); 1251 else 1252 error = ffs_flushfiles(mp, flags, td); 1253 if (error != 0 && error != ENXIO) 1254 goto fail; 1255 1256 UFS_LOCK(ump); 1257 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) { 1258 printf("WARNING: unmount %s: pending error: blocks %jd " 1259 "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks, 1260 fs->fs_pendinginodes); 1261 fs->fs_pendingblocks = 0; 1262 fs->fs_pendinginodes = 0; 1263 } 1264 UFS_UNLOCK(ump); 1265 if (MOUNTEDSOFTDEP(mp)) 1266 softdep_unmount(mp); 1267 if (fs->fs_ronly == 0 || ump->um_fsckpid > 0) { 1268 fs->fs_clean = fs->fs_flags & (FS_UNCLEAN|FS_NEEDSFSCK) ? 0 : 1; 1269 error = ffs_sbupdate(ump, MNT_WAIT, 0); 1270 if (error && error != ENXIO) { 1271 fs->fs_clean = 0; 1272 goto fail; 1273 } 1274 } 1275 if (susp) 1276 vfs_write_resume(mp, VR_START_WRITE); 1277 if (ump->um_trim_tq != NULL) { 1278 while (ump->um_trim_inflight != 0) 1279 pause("ufsutr", hz); 1280 taskqueue_drain_all(ump->um_trim_tq); 1281 taskqueue_free(ump->um_trim_tq); 1282 } 1283 DROP_GIANT(); 1284 g_topology_lock(); 1285 if (ump->um_fsckpid > 0) { 1286 /* 1287 * Return to normal read-only mode. 1288 */ 1289 error = g_access(ump->um_cp, 0, -1, 0); 1290 ump->um_fsckpid = 0; 1291 } 1292 g_vfs_close(ump->um_cp); 1293 g_topology_unlock(); 1294 PICKUP_GIANT(); 1295 atomic_store_rel_ptr((uintptr_t *)&ump->um_dev->si_mountpt, 0); 1296 vrele(ump->um_devvp); 1297 dev_rel(ump->um_dev); 1298 mtx_destroy(UFS_MTX(ump)); 1299 if (mp->mnt_gjprovider != NULL) { 1300 free(mp->mnt_gjprovider, M_UFSMNT); 1301 mp->mnt_gjprovider = NULL; 1302 } 1303 free(fs->fs_csp, M_UFSMNT); 1304 free(fs, M_UFSMNT); 1305 free(ump, M_UFSMNT); 1306 mp->mnt_data = NULL; 1307 MNT_ILOCK(mp); 1308 mp->mnt_flag &= ~MNT_LOCAL; 1309 MNT_IUNLOCK(mp); 1310 return (error); 1311 1312fail: 1313 if (susp) 1314 vfs_write_resume(mp, VR_START_WRITE); 1315fail1: 1316#ifdef UFS_EXTATTR 1317 if (e_restart) { 1318 ufs_extattr_uepm_init(&ump->um_extattr); 1319#ifdef UFS_EXTATTR_AUTOSTART 1320 (void) ufs_extattr_autostart(mp, td); 1321#endif 1322 } 1323#endif 1324 1325 return (error); 1326} 1327 1328/* 1329 * Flush out all the files in a filesystem. 1330 */ 1331int 1332ffs_flushfiles(mp, flags, td) 1333 struct mount *mp; 1334 int flags; 1335 struct thread *td; 1336{ 1337 struct ufsmount *ump; 1338 int qerror, error; 1339 1340 ump = VFSTOUFS(mp); 1341 qerror = 0; 1342#ifdef QUOTA 1343 if (mp->mnt_flag & MNT_QUOTA) { 1344 int i; 1345 error = vflush(mp, 0, SKIPSYSTEM|flags, td); 1346 if (error) 1347 return (error); 1348 for (i = 0; i < MAXQUOTAS; i++) { 1349 error = quotaoff(td, mp, i); 1350 if (error != 0) { 1351 if ((flags & EARLYFLUSH) == 0) 1352 return (error); 1353 else 1354 qerror = error; 1355 } 1356 } 1357 1358 /* 1359 * Here we fall through to vflush again to ensure that 1360 * we have gotten rid of all the system vnodes, unless 1361 * quotas must not be closed. 1362 */ 1363 } 1364#endif 1365 ASSERT_VOP_LOCKED(ump->um_devvp, "ffs_flushfiles"); 1366 if (ump->um_devvp->v_vflag & VV_COPYONWRITE) { 1367 if ((error = vflush(mp, 0, SKIPSYSTEM | flags, td)) != 0) 1368 return (error); 1369 ffs_snapshot_unmount(mp); 1370 flags |= FORCECLOSE; 1371 /* 1372 * Here we fall through to vflush again to ensure 1373 * that we have gotten rid of all the system vnodes. 1374 */ 1375 } 1376 1377 /* 1378 * Do not close system files if quotas were not closed, to be 1379 * able to sync the remaining dquots. The freeblks softupdate 1380 * workitems might hold a reference on a dquot, preventing 1381 * quotaoff() from completing. Next round of 1382 * softdep_flushworklist() iteration should process the 1383 * blockers, allowing the next run of quotaoff() to finally 1384 * flush held dquots. 1385 * 1386 * Otherwise, flush all the files. 1387 */ 1388 if (qerror == 0 && (error = vflush(mp, 0, flags, td)) != 0) 1389 return (error); 1390 1391 /* 1392 * Flush filesystem metadata. 1393 */ 1394 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY); 1395 error = VOP_FSYNC(ump->um_devvp, MNT_WAIT, td); 1396 VOP_UNLOCK(ump->um_devvp, 0); 1397 return (error); 1398} 1399 1400/* 1401 * Get filesystem statistics. 1402 */ 1403static int 1404ffs_statfs(mp, sbp) 1405 struct mount *mp; 1406 struct statfs *sbp; 1407{ 1408 struct ufsmount *ump; 1409 struct fs *fs; 1410 1411 ump = VFSTOUFS(mp); 1412 fs = ump->um_fs; 1413 if (fs->fs_magic != FS_UFS1_MAGIC && fs->fs_magic != FS_UFS2_MAGIC) 1414 panic("ffs_statfs"); 1415 sbp->f_version = STATFS_VERSION; 1416 sbp->f_bsize = fs->fs_fsize; 1417 sbp->f_iosize = fs->fs_bsize; 1418 sbp->f_blocks = fs->fs_dsize; 1419 UFS_LOCK(ump); 1420 sbp->f_bfree = fs->fs_cstotal.cs_nbfree * fs->fs_frag + 1421 fs->fs_cstotal.cs_nffree + dbtofsb(fs, fs->fs_pendingblocks); 1422 sbp->f_bavail = freespace(fs, fs->fs_minfree) + 1423 dbtofsb(fs, fs->fs_pendingblocks); 1424 sbp->f_files = fs->fs_ncg * fs->fs_ipg - ROOTINO; 1425 sbp->f_ffree = fs->fs_cstotal.cs_nifree + fs->fs_pendinginodes; 1426 UFS_UNLOCK(ump); 1427 sbp->f_namemax = NAME_MAX; 1428 return (0); 1429} 1430 1431static bool 1432sync_doupdate(struct inode *ip) 1433{ 1434 1435 return ((ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED | 1436 IN_UPDATE)) != 0); 1437} 1438 1439/* 1440 * For a lazy sync, we only care about access times, quotas and the 1441 * superblock. Other filesystem changes are already converted to 1442 * cylinder group blocks or inode blocks updates and are written to 1443 * disk by syncer. 1444 */ 1445static int 1446ffs_sync_lazy(mp) 1447 struct mount *mp; 1448{ 1449 struct vnode *mvp, *vp; 1450 struct inode *ip; 1451 struct thread *td; 1452 int allerror, error; 1453 1454 allerror = 0; 1455 td = curthread; 1456 if ((mp->mnt_flag & MNT_NOATIME) != 0) 1457 goto qupdate; 1458 MNT_VNODE_FOREACH_ACTIVE(vp, mp, mvp) { 1459 if (vp->v_type == VNON) { 1460 VI_UNLOCK(vp); 1461 continue; 1462 } 1463 ip = VTOI(vp); 1464 1465 /* 1466 * The IN_ACCESS flag is converted to IN_MODIFIED by 1467 * ufs_close() and ufs_getattr() by the calls to 1468 * ufs_itimes_locked(), without subsequent UFS_UPDATE(). 1469 * Test also all the other timestamp flags too, to pick up 1470 * any other cases that could be missed. 1471 */ 1472 if (!sync_doupdate(ip) && (vp->v_iflag & VI_OWEINACT) == 0) { 1473 VI_UNLOCK(vp); 1474 continue; 1475 } 1476 if ((error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, 1477 td)) != 0) 1478 continue; 1479 if (sync_doupdate(ip)) 1480 error = ffs_update(vp, 0); 1481 if (error != 0) 1482 allerror = error; 1483 vput(vp); 1484 } 1485 1486qupdate: 1487#ifdef QUOTA 1488 qsync(mp); 1489#endif 1490 1491 if (VFSTOUFS(mp)->um_fs->fs_fmod != 0 && 1492 (error = ffs_sbupdate(VFSTOUFS(mp), MNT_LAZY, 0)) != 0) 1493 allerror = error; 1494 return (allerror); 1495} 1496 1497/* 1498 * Go through the disk queues to initiate sandbagged IO; 1499 * go through the inodes to write those that have been modified; 1500 * initiate the writing of the super block if it has been modified. 1501 * 1502 * Note: we are always called with the filesystem marked busy using 1503 * vfs_busy(). 1504 */ 1505static int 1506ffs_sync(mp, waitfor) 1507 struct mount *mp; 1508 int waitfor; 1509{ 1510 struct vnode *mvp, *vp, *devvp; 1511 struct thread *td; 1512 struct inode *ip; 1513 struct ufsmount *ump = VFSTOUFS(mp); 1514 struct fs *fs; 1515 int error, count, lockreq, allerror = 0; 1516 int suspend; 1517 int suspended; 1518 int secondary_writes; 1519 int secondary_accwrites; 1520 int softdep_deps; 1521 int softdep_accdeps; 1522 struct bufobj *bo; 1523 1524 suspend = 0; 1525 suspended = 0; 1526 td = curthread; 1527 fs = ump->um_fs; 1528 if (fs->fs_fmod != 0 && fs->fs_ronly != 0 && ump->um_fsckpid == 0) 1529 panic("%s: ffs_sync: modification on read-only filesystem", 1530 fs->fs_fsmnt); 1531 if (waitfor == MNT_LAZY) { 1532 if (!rebooting) 1533 return (ffs_sync_lazy(mp)); 1534 waitfor = MNT_NOWAIT; 1535 } 1536 1537 /* 1538 * Write back each (modified) inode. 1539 */ 1540 lockreq = LK_EXCLUSIVE | LK_NOWAIT; 1541 if (waitfor == MNT_SUSPEND) { 1542 suspend = 1; 1543 waitfor = MNT_WAIT; 1544 } 1545 if (waitfor == MNT_WAIT) 1546 lockreq = LK_EXCLUSIVE; 1547 lockreq |= LK_INTERLOCK | LK_SLEEPFAIL; 1548loop: 1549 /* Grab snapshot of secondary write counts */ 1550 MNT_ILOCK(mp); 1551 secondary_writes = mp->mnt_secondary_writes; 1552 secondary_accwrites = mp->mnt_secondary_accwrites; 1553 MNT_IUNLOCK(mp); 1554 1555 /* Grab snapshot of softdep dependency counts */ 1556 softdep_get_depcounts(mp, &softdep_deps, &softdep_accdeps); 1557 1558 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 1559 /* 1560 * Depend on the vnode interlock to keep things stable enough 1561 * for a quick test. Since there might be hundreds of 1562 * thousands of vnodes, we cannot afford even a subroutine 1563 * call unless there's a good chance that we have work to do. 1564 */ 1565 if (vp->v_type == VNON) { 1566 VI_UNLOCK(vp); 1567 continue; 1568 } 1569 ip = VTOI(vp); 1570 if ((ip->i_flag & 1571 (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 && 1572 vp->v_bufobj.bo_dirty.bv_cnt == 0) { 1573 VI_UNLOCK(vp); 1574 continue; 1575 } 1576 if ((error = vget(vp, lockreq, td)) != 0) { 1577 if (error == ENOENT || error == ENOLCK) { 1578 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 1579 goto loop; 1580 } 1581 continue; 1582 } 1583 if ((error = ffs_syncvnode(vp, waitfor, 0)) != 0) 1584 allerror = error; 1585 vput(vp); 1586 } 1587 /* 1588 * Force stale filesystem control information to be flushed. 1589 */ 1590 if (waitfor == MNT_WAIT || rebooting) { 1591 if ((error = softdep_flushworklist(ump->um_mountp, &count, td))) 1592 allerror = error; 1593 /* Flushed work items may create new vnodes to clean */ 1594 if (allerror == 0 && count) 1595 goto loop; 1596 } 1597#ifdef QUOTA 1598 qsync(mp); 1599#endif 1600 1601 devvp = ump->um_devvp; 1602 bo = &devvp->v_bufobj; 1603 BO_LOCK(bo); 1604 if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) { 1605 BO_UNLOCK(bo); 1606 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 1607 error = VOP_FSYNC(devvp, waitfor, td); 1608 VOP_UNLOCK(devvp, 0); 1609 if (MOUNTEDSOFTDEP(mp) && (error == 0 || error == EAGAIN)) 1610 error = ffs_sbupdate(ump, waitfor, 0); 1611 if (error != 0) 1612 allerror = error; 1613 if (allerror == 0 && waitfor == MNT_WAIT) 1614 goto loop; 1615 } else if (suspend != 0) { 1616 if (softdep_check_suspend(mp, 1617 devvp, 1618 softdep_deps, 1619 softdep_accdeps, 1620 secondary_writes, 1621 secondary_accwrites) != 0) { 1622 MNT_IUNLOCK(mp); 1623 goto loop; /* More work needed */ 1624 } 1625 mtx_assert(MNT_MTX(mp), MA_OWNED); 1626 mp->mnt_kern_flag |= MNTK_SUSPEND2 | MNTK_SUSPENDED; 1627 MNT_IUNLOCK(mp); 1628 suspended = 1; 1629 } else 1630 BO_UNLOCK(bo); 1631 /* 1632 * Write back modified superblock. 1633 */ 1634 if (fs->fs_fmod != 0 && 1635 (error = ffs_sbupdate(ump, waitfor, suspended)) != 0) 1636 allerror = error; 1637 return (allerror); 1638} 1639 1640int 1641ffs_vget(mp, ino, flags, vpp) 1642 struct mount *mp; 1643 ino_t ino; 1644 int flags; 1645 struct vnode **vpp; 1646{ 1647 return (ffs_vgetf(mp, ino, flags, vpp, 0)); 1648} 1649 1650int 1651ffs_vgetf(mp, ino, flags, vpp, ffs_flags) 1652 struct mount *mp; 1653 ino_t ino; 1654 int flags; 1655 struct vnode **vpp; 1656 int ffs_flags; 1657{ 1658 struct fs *fs; 1659 struct inode *ip; 1660 struct ufsmount *ump; 1661 struct buf *bp; 1662 struct vnode *vp; 1663 struct cdev *dev; 1664 int error; 1665 1666 error = vfs_hash_get(mp, ino, flags, curthread, vpp, NULL, NULL); 1667 if (error || *vpp != NULL) 1668 return (error); 1669 1670 /* 1671 * We must promote to an exclusive lock for vnode creation. This 1672 * can happen if lookup is passed LOCKSHARED. 1673 */ 1674 if ((flags & LK_TYPE_MASK) == LK_SHARED) { 1675 flags &= ~LK_TYPE_MASK; 1676 flags |= LK_EXCLUSIVE; 1677 } 1678 1679 /* 1680 * We do not lock vnode creation as it is believed to be too 1681 * expensive for such rare case as simultaneous creation of vnode 1682 * for same ino by different processes. We just allow them to race 1683 * and check later to decide who wins. Let the race begin! 1684 */ 1685 1686 ump = VFSTOUFS(mp); 1687 dev = ump->um_dev; 1688 fs = ump->um_fs; 1689 ip = uma_zalloc(uma_inode, M_WAITOK | M_ZERO); 1690 1691 /* Allocate a new vnode/inode. */ 1692 error = getnewvnode("ufs", mp, fs->fs_magic == FS_UFS1_MAGIC ? 1693 &ffs_vnodeops1 : &ffs_vnodeops2, &vp); 1694 if (error) { 1695 *vpp = NULL; 1696 uma_zfree(uma_inode, ip); 1697 return (error); 1698 } 1699 /* 1700 * FFS supports recursive locking. 1701 */ 1702 lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL); 1703 VN_LOCK_AREC(vp); 1704 vp->v_data = ip; 1705 vp->v_bufobj.bo_bsize = fs->fs_bsize; 1706 ip->i_vnode = vp; 1707 ip->i_ump = ump; 1708 ip->i_fs = fs; 1709 ip->i_dev = dev; 1710 ip->i_number = ino; 1711 ip->i_ea_refs = 0; 1712 ip->i_nextclustercg = -1; 1713#ifdef QUOTA 1714 { 1715 int i; 1716 for (i = 0; i < MAXQUOTAS; i++) 1717 ip->i_dquot[i] = NODQUOT; 1718 } 1719#endif 1720 1721 if (ffs_flags & FFSV_FORCEINSMQ) 1722 vp->v_vflag |= VV_FORCEINSMQ; 1723 error = insmntque(vp, mp); 1724 if (error != 0) { 1725 uma_zfree(uma_inode, ip); 1726 *vpp = NULL; 1727 return (error); 1728 } 1729 vp->v_vflag &= ~VV_FORCEINSMQ; 1730 error = vfs_hash_insert(vp, ino, flags, curthread, vpp, NULL, NULL); 1731 if (error || *vpp != NULL) 1732 return (error); 1733 1734 /* Read in the disk contents for the inode, copy into the inode. */ 1735 error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)), 1736 (int)fs->fs_bsize, NOCRED, &bp); 1737 if (error) { 1738 /* 1739 * The inode does not contain anything useful, so it would 1740 * be misleading to leave it on its hash chain. With mode 1741 * still zero, it will be unlinked and returned to the free 1742 * list by vput(). 1743 */ 1744 brelse(bp); 1745 vput(vp); 1746 *vpp = NULL; 1747 return (error); 1748 } 1749 if (ip->i_ump->um_fstype == UFS1) 1750 ip->i_din1 = uma_zalloc(uma_ufs1, M_WAITOK); 1751 else 1752 ip->i_din2 = uma_zalloc(uma_ufs2, M_WAITOK); 1753 ffs_load_inode(bp, ip, fs, ino); 1754 if (DOINGSOFTDEP(vp)) 1755 softdep_load_inodeblock(ip); 1756 else 1757 ip->i_effnlink = ip->i_nlink; 1758 bqrelse(bp); 1759 1760 /* 1761 * Initialize the vnode from the inode, check for aliases. 1762 * Note that the underlying vnode may have changed. 1763 */ 1764 if (ip->i_ump->um_fstype == UFS1) 1765 error = ufs_vinit(mp, &ffs_fifoops1, &vp); 1766 else 1767 error = ufs_vinit(mp, &ffs_fifoops2, &vp); 1768 if (error) { 1769 vput(vp); 1770 *vpp = NULL; 1771 return (error); 1772 } 1773 1774 /* 1775 * Finish inode initialization. 1776 */ 1777 if (vp->v_type != VFIFO) { 1778 /* FFS supports shared locking for all files except fifos. */ 1779 VN_LOCK_ASHARE(vp); 1780 } 1781 1782 /* 1783 * Set up a generation number for this inode if it does not 1784 * already have one. This should only happen on old filesystems. 1785 */ 1786 if (ip->i_gen == 0) { 1787 ip->i_gen = arc4random() / 2 + 1; 1788 if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { 1789 ip->i_flag |= IN_MODIFIED; 1790 DIP_SET(ip, i_gen, ip->i_gen); 1791 } 1792 } 1793#ifdef MAC 1794 if ((mp->mnt_flag & MNT_MULTILABEL) && ip->i_mode) { 1795 /* 1796 * If this vnode is already allocated, and we're running 1797 * multi-label, attempt to perform a label association 1798 * from the extended attributes on the inode. 1799 */ 1800 error = mac_vnode_associate_extattr(mp, vp); 1801 if (error) { 1802 /* ufs_inactive will release ip->i_devvp ref. */ 1803 vput(vp); 1804 *vpp = NULL; 1805 return (error); 1806 } 1807 } 1808#endif 1809 1810 *vpp = vp; 1811 return (0); 1812} 1813 1814/* 1815 * File handle to vnode 1816 * 1817 * Have to be really careful about stale file handles: 1818 * - check that the inode number is valid 1819 * - call ffs_vget() to get the locked inode 1820 * - check for an unallocated inode (i_mode == 0) 1821 * - check that the given client host has export rights and return 1822 * those rights via. exflagsp and credanonp 1823 */ 1824static int 1825ffs_fhtovp(mp, fhp, flags, vpp) 1826 struct mount *mp; 1827 struct fid *fhp; 1828 int flags; 1829 struct vnode **vpp; 1830{ 1831 struct ufid *ufhp; 1832 struct fs *fs; 1833 1834 ufhp = (struct ufid *)fhp; 1835 fs = VFSTOUFS(mp)->um_fs; 1836 if (ufhp->ufid_ino < ROOTINO || 1837 ufhp->ufid_ino >= fs->fs_ncg * fs->fs_ipg) 1838 return (ESTALE); 1839 return (ufs_fhtovp(mp, ufhp, flags, vpp)); 1840} 1841 1842/* 1843 * Initialize the filesystem. 1844 */ 1845static int 1846ffs_init(vfsp) 1847 struct vfsconf *vfsp; 1848{ 1849 1850 ffs_susp_initialize(); 1851 softdep_initialize(); 1852 return (ufs_init(vfsp)); 1853} 1854 1855/* 1856 * Undo the work of ffs_init(). 1857 */ 1858static int 1859ffs_uninit(vfsp) 1860 struct vfsconf *vfsp; 1861{ 1862 int ret; 1863 1864 ret = ufs_uninit(vfsp); 1865 softdep_uninitialize(); 1866 ffs_susp_uninitialize(); 1867 return (ret); 1868} 1869 1870/* 1871 * Write a superblock and associated information back to disk. 1872 */ 1873int 1874ffs_sbupdate(ump, waitfor, suspended) 1875 struct ufsmount *ump; 1876 int waitfor; 1877 int suspended; 1878{ 1879 struct fs *fs = ump->um_fs; 1880 struct buf *sbbp; 1881 struct buf *bp; 1882 int blks; 1883 void *space; 1884 int i, size, error, allerror = 0; 1885 1886 if (fs->fs_ronly == 1 && 1887 (ump->um_mountp->mnt_flag & (MNT_RDONLY | MNT_UPDATE)) != 1888 (MNT_RDONLY | MNT_UPDATE) && ump->um_fsckpid == 0) 1889 panic("ffs_sbupdate: write read-only filesystem"); 1890 /* 1891 * We use the superblock's buf to serialize calls to ffs_sbupdate(). 1892 */ 1893 sbbp = getblk(ump->um_devvp, btodb(fs->fs_sblockloc), 1894 (int)fs->fs_sbsize, 0, 0, 0); 1895 /* 1896 * First write back the summary information. 1897 */ 1898 blks = howmany(fs->fs_cssize, fs->fs_fsize); 1899 space = fs->fs_csp; 1900 for (i = 0; i < blks; i += fs->fs_frag) { 1901 size = fs->fs_bsize; 1902 if (i + fs->fs_frag > blks) 1903 size = (blks - i) * fs->fs_fsize; 1904 bp = getblk(ump->um_devvp, fsbtodb(fs, fs->fs_csaddr + i), 1905 size, 0, 0, 0); 1906 bcopy(space, bp->b_data, (u_int)size); 1907 space = (char *)space + size; 1908 if (suspended) 1909 bp->b_flags |= B_VALIDSUSPWRT; 1910 if (waitfor != MNT_WAIT) 1911 bawrite(bp); 1912 else if ((error = bwrite(bp)) != 0) 1913 allerror = error; 1914 } 1915 /* 1916 * Now write back the superblock itself. If any errors occurred 1917 * up to this point, then fail so that the superblock avoids 1918 * being written out as clean. 1919 */ 1920 if (allerror) { 1921 brelse(sbbp); 1922 return (allerror); 1923 } 1924 bp = sbbp; 1925 if (fs->fs_magic == FS_UFS1_MAGIC && fs->fs_sblockloc != SBLOCK_UFS1 && 1926 (fs->fs_flags & FS_FLAGS_UPDATED) == 0) { 1927 printf("WARNING: %s: correcting fs_sblockloc from %jd to %d\n", 1928 fs->fs_fsmnt, fs->fs_sblockloc, SBLOCK_UFS1); 1929 fs->fs_sblockloc = SBLOCK_UFS1; 1930 } 1931 if (fs->fs_magic == FS_UFS2_MAGIC && fs->fs_sblockloc != SBLOCK_UFS2 && 1932 (fs->fs_flags & FS_FLAGS_UPDATED) == 0) { 1933 printf("WARNING: %s: correcting fs_sblockloc from %jd to %d\n", 1934 fs->fs_fsmnt, fs->fs_sblockloc, SBLOCK_UFS2); 1935 fs->fs_sblockloc = SBLOCK_UFS2; 1936 } 1937 fs->fs_fmod = 0; 1938 fs->fs_time = time_second; 1939 if (MOUNTEDSOFTDEP(ump->um_mountp)) 1940 softdep_setup_sbupdate(ump, (struct fs *)bp->b_data, bp); 1941 bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize); 1942 ffs_oldfscompat_write((struct fs *)bp->b_data, ump); 1943 if (suspended) 1944 bp->b_flags |= B_VALIDSUSPWRT; 1945 if (waitfor != MNT_WAIT) 1946 bawrite(bp); 1947 else if ((error = bwrite(bp)) != 0) 1948 allerror = error; 1949 return (allerror); 1950} 1951 1952static int 1953ffs_extattrctl(struct mount *mp, int cmd, struct vnode *filename_vp, 1954 int attrnamespace, const char *attrname) 1955{ 1956 1957#ifdef UFS_EXTATTR 1958 return (ufs_extattrctl(mp, cmd, filename_vp, attrnamespace, 1959 attrname)); 1960#else 1961 return (vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, 1962 attrname)); 1963#endif 1964} 1965 1966static void 1967ffs_ifree(struct ufsmount *ump, struct inode *ip) 1968{ 1969 1970 if (ump->um_fstype == UFS1 && ip->i_din1 != NULL) 1971 uma_zfree(uma_ufs1, ip->i_din1); 1972 else if (ip->i_din2 != NULL) 1973 uma_zfree(uma_ufs2, ip->i_din2); 1974 uma_zfree(uma_inode, ip); 1975} 1976 1977static int dobkgrdwrite = 1; 1978SYSCTL_INT(_debug, OID_AUTO, dobkgrdwrite, CTLFLAG_RW, &dobkgrdwrite, 0, 1979 "Do background writes (honoring the BV_BKGRDWRITE flag)?"); 1980 1981/* 1982 * Complete a background write started from bwrite. 1983 */ 1984static void 1985ffs_backgroundwritedone(struct buf *bp) 1986{ 1987 struct bufobj *bufobj; 1988 struct buf *origbp; 1989 1990 /* 1991 * Find the original buffer that we are writing. 1992 */ 1993 bufobj = bp->b_bufobj; 1994 BO_LOCK(bufobj); 1995 if ((origbp = gbincore(bp->b_bufobj, bp->b_lblkno)) == NULL) 1996 panic("backgroundwritedone: lost buffer"); 1997 1998 /* 1999 * We should mark the cylinder group buffer origbp as 2000 * dirty, to not loose the failed write. 2001 */ 2002 if ((bp->b_ioflags & BIO_ERROR) != 0) 2003 origbp->b_vflags |= BV_BKGRDERR; 2004 BO_UNLOCK(bufobj); 2005 /* 2006 * Process dependencies then return any unfinished ones. 2007 */ 2008 pbrelvp(bp); 2009 if (!LIST_EMPTY(&bp->b_dep) && (bp->b_ioflags & BIO_ERROR) == 0) 2010 buf_complete(bp); 2011#ifdef SOFTUPDATES 2012 if (!LIST_EMPTY(&bp->b_dep)) 2013 softdep_move_dependencies(bp, origbp); 2014#endif 2015 /* 2016 * This buffer is marked B_NOCACHE so when it is released 2017 * by biodone it will be tossed. 2018 */ 2019 bp->b_flags |= B_NOCACHE; 2020 bp->b_flags &= ~B_CACHE; 2021 2022 /* 2023 * Prevent brelse() from trying to keep and re-dirtying bp on 2024 * errors. It causes b_bufobj dereference in 2025 * bdirty()/reassignbuf(), and b_bufobj was cleared in 2026 * pbrelvp() above. 2027 */ 2028 if ((bp->b_ioflags & BIO_ERROR) != 0) 2029 bp->b_flags |= B_INVAL; 2030 bufdone(bp); 2031 BO_LOCK(bufobj); 2032 /* 2033 * Clear the BV_BKGRDINPROG flag in the original buffer 2034 * and awaken it if it is waiting for the write to complete. 2035 * If BV_BKGRDINPROG is not set in the original buffer it must 2036 * have been released and re-instantiated - which is not legal. 2037 */ 2038 KASSERT((origbp->b_vflags & BV_BKGRDINPROG), 2039 ("backgroundwritedone: lost buffer2")); 2040 origbp->b_vflags &= ~BV_BKGRDINPROG; 2041 if (origbp->b_vflags & BV_BKGRDWAIT) { 2042 origbp->b_vflags &= ~BV_BKGRDWAIT; 2043 wakeup(&origbp->b_xflags); 2044 } 2045 BO_UNLOCK(bufobj); 2046} 2047 2048 2049/* 2050 * Write, release buffer on completion. (Done by iodone 2051 * if async). Do not bother writing anything if the buffer 2052 * is invalid. 2053 * 2054 * Note that we set B_CACHE here, indicating that buffer is 2055 * fully valid and thus cacheable. This is true even of NFS 2056 * now so we set it generally. This could be set either here 2057 * or in biodone() since the I/O is synchronous. We put it 2058 * here. 2059 */ 2060static int 2061ffs_bufwrite(struct buf *bp) 2062{ 2063 struct buf *newbp; 2064 2065 CTR3(KTR_BUF, "bufwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 2066 if (bp->b_flags & B_INVAL) { 2067 brelse(bp); 2068 return (0); 2069 } 2070 2071 if (!BUF_ISLOCKED(bp)) 2072 panic("bufwrite: buffer is not busy???"); 2073 /* 2074 * If a background write is already in progress, delay 2075 * writing this block if it is asynchronous. Otherwise 2076 * wait for the background write to complete. 2077 */ 2078 BO_LOCK(bp->b_bufobj); 2079 if (bp->b_vflags & BV_BKGRDINPROG) { 2080 if (bp->b_flags & B_ASYNC) { 2081 BO_UNLOCK(bp->b_bufobj); 2082 bdwrite(bp); 2083 return (0); 2084 } 2085 bp->b_vflags |= BV_BKGRDWAIT; 2086 msleep(&bp->b_xflags, BO_LOCKPTR(bp->b_bufobj), PRIBIO, 2087 "bwrbg", 0); 2088 if (bp->b_vflags & BV_BKGRDINPROG) 2089 panic("bufwrite: still writing"); 2090 } 2091 bp->b_vflags &= ~BV_BKGRDERR; 2092 BO_UNLOCK(bp->b_bufobj); 2093 2094 /* 2095 * If this buffer is marked for background writing and we 2096 * do not have to wait for it, make a copy and write the 2097 * copy so as to leave this buffer ready for further use. 2098 * 2099 * This optimization eats a lot of memory. If we have a page 2100 * or buffer shortfall we can't do it. 2101 */ 2102 if (dobkgrdwrite && (bp->b_xflags & BX_BKGRDWRITE) && 2103 (bp->b_flags & B_ASYNC) && 2104 !vm_page_count_severe() && 2105 !buf_dirty_count_severe()) { 2106 KASSERT(bp->b_iodone == NULL, 2107 ("bufwrite: needs chained iodone (%p)", bp->b_iodone)); 2108 2109 /* get a new block */ 2110 newbp = geteblk(bp->b_bufsize, GB_NOWAIT_BD); 2111 if (newbp == NULL) 2112 goto normal_write; 2113 2114 KASSERT((bp->b_flags & B_UNMAPPED) == 0, ("Unmapped cg")); 2115 memcpy(newbp->b_data, bp->b_data, bp->b_bufsize); 2116 BO_LOCK(bp->b_bufobj); 2117 bp->b_vflags |= BV_BKGRDINPROG; 2118 BO_UNLOCK(bp->b_bufobj); 2119 newbp->b_xflags |= BX_BKGRDMARKER; 2120 newbp->b_lblkno = bp->b_lblkno; 2121 newbp->b_blkno = bp->b_blkno; 2122 newbp->b_offset = bp->b_offset; 2123 newbp->b_iodone = ffs_backgroundwritedone; 2124 newbp->b_flags |= B_ASYNC; 2125 newbp->b_flags &= ~B_INVAL; 2126 pbgetvp(bp->b_vp, newbp); 2127 2128#ifdef SOFTUPDATES 2129 /* 2130 * Move over the dependencies. If there are rollbacks, 2131 * leave the parent buffer dirtied as it will need to 2132 * be written again. 2133 */ 2134 if (LIST_EMPTY(&bp->b_dep) || 2135 softdep_move_dependencies(bp, newbp) == 0) 2136 bundirty(bp); 2137#else 2138 bundirty(bp); 2139#endif 2140 2141 /* 2142 * Initiate write on the copy, release the original. The 2143 * BKGRDINPROG flag prevents it from going away until 2144 * the background write completes. 2145 */ 2146 bqrelse(bp); 2147 bp = newbp; 2148 } else 2149 /* Mark the buffer clean */ 2150 bundirty(bp); 2151 2152 2153 /* Let the normal bufwrite do the rest for us */ 2154normal_write: 2155 return (bufwrite(bp)); 2156} 2157 2158 2159static void 2160ffs_geom_strategy(struct bufobj *bo, struct buf *bp) 2161{ 2162 struct vnode *vp; 2163 int error; 2164 struct buf *tbp; 2165 int nocopy; 2166 2167 vp = bo->__bo_vnode; 2168 if (bp->b_iocmd == BIO_WRITE) { 2169 if ((bp->b_flags & B_VALIDSUSPWRT) == 0 && 2170 bp->b_vp != NULL && bp->b_vp->v_mount != NULL && 2171 (bp->b_vp->v_mount->mnt_kern_flag & MNTK_SUSPENDED) != 0) 2172 panic("ffs_geom_strategy: bad I/O"); 2173 nocopy = bp->b_flags & B_NOCOPY; 2174 bp->b_flags &= ~(B_VALIDSUSPWRT | B_NOCOPY); 2175 if ((vp->v_vflag & VV_COPYONWRITE) && nocopy == 0 && 2176 vp->v_rdev->si_snapdata != NULL) { 2177 if ((bp->b_flags & B_CLUSTER) != 0) { 2178 runningbufwakeup(bp); 2179 TAILQ_FOREACH(tbp, &bp->b_cluster.cluster_head, 2180 b_cluster.cluster_entry) { 2181 error = ffs_copyonwrite(vp, tbp); 2182 if (error != 0 && 2183 error != EOPNOTSUPP) { 2184 bp->b_error = error; 2185 bp->b_ioflags |= BIO_ERROR; 2186 bufdone(bp); 2187 return; 2188 } 2189 } 2190 bp->b_runningbufspace = bp->b_bufsize; 2191 atomic_add_long(&runningbufspace, 2192 bp->b_runningbufspace); 2193 } else { 2194 error = ffs_copyonwrite(vp, bp); 2195 if (error != 0 && error != EOPNOTSUPP) { 2196 bp->b_error = error; 2197 bp->b_ioflags |= BIO_ERROR; 2198 bufdone(bp); 2199 return; 2200 } 2201 } 2202 } 2203#ifdef SOFTUPDATES 2204 if ((bp->b_flags & B_CLUSTER) != 0) { 2205 TAILQ_FOREACH(tbp, &bp->b_cluster.cluster_head, 2206 b_cluster.cluster_entry) { 2207 if (!LIST_EMPTY(&tbp->b_dep)) 2208 buf_start(tbp); 2209 } 2210 } else { 2211 if (!LIST_EMPTY(&bp->b_dep)) 2212 buf_start(bp); 2213 } 2214 2215#endif 2216 } 2217 g_vfs_strategy(bo, bp); 2218} 2219 2220int 2221ffs_own_mount(const struct mount *mp) 2222{ 2223 2224 if (mp->mnt_op == &ufs_vfsops) 2225 return (1); 2226 return (0); 2227} 2228 2229#ifdef DDB 2230#ifdef SOFTUPDATES 2231 2232/* defined in ffs_softdep.c */ 2233extern void db_print_ffs(struct ufsmount *ump); 2234 2235DB_SHOW_COMMAND(ffs, db_show_ffs) 2236{ 2237 struct mount *mp; 2238 struct ufsmount *ump; 2239 2240 if (have_addr) { 2241 ump = VFSTOUFS((struct mount *)addr); 2242 db_print_ffs(ump); 2243 return; 2244 } 2245 2246 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 2247 if (!strcmp(mp->mnt_stat.f_fstypename, ufs_vfsconf.vfc_name)) 2248 db_print_ffs(VFSTOUFS(mp)); 2249 } 2250} 2251 2252#endif /* SOFTUPDATES */ 2253#endif /* DDB */ 2254