1/* $NetBSD$ */ 2 3/*- 4 * Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Konrad E. Schroder <perseant@hhhh.org>. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31/* 32 * Copyright (c) 1986, 1989, 1991, 1993, 1995 33 * The Regents of the University of California. All rights reserved. 34 * 35 * Redistribution and use in source and binary forms, with or without 36 * modification, are permitted provided that the following conditions 37 * are met: 38 * 1. Redistributions of source code must retain the above copyright 39 * notice, this list of conditions and the following disclaimer. 40 * 2. Redistributions in binary form must reproduce the above copyright 41 * notice, this list of conditions and the following disclaimer in the 42 * documentation and/or other materials provided with the distribution. 43 * 3. Neither the name of the University nor the names of its contributors 44 * may be used to endorse or promote products derived from this software 45 * without specific prior written permission. 46 * 47 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 48 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 50 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 57 * SUCH DAMAGE. 58 * 59 * @(#)lfs_vnops.c 8.13 (Berkeley) 6/10/95 60 */ 61 62#include <sys/cdefs.h> 63__KERNEL_RCSID(0, "$NetBSD$"); 64 65#ifdef _KERNEL_OPT 66#include "opt_compat_netbsd.h" 67#include "opt_uvm_page_trkown.h" 68#endif 69 70#include <sys/param.h> 71#include <sys/systm.h> 72#include <sys/namei.h> 73#include <sys/resourcevar.h> 74#include <sys/kernel.h> 75#include <sys/file.h> 76#include <sys/stat.h> 77#include <sys/buf.h> 78#include <sys/proc.h> 79#include <sys/mount.h> 80#include <sys/vnode.h> 81#include <sys/pool.h> 82#include <sys/signalvar.h> 83#include <sys/kauth.h> 84#include <sys/syslog.h> 85#include <sys/fstrans.h> 86 87#include <miscfs/fifofs/fifo.h> 88#include <miscfs/genfs/genfs.h> 89#include <miscfs/specfs/specdev.h> 90 91#include <ufs/ufs/inode.h> 92#include <ufs/ufs/dir.h> 93#include <ufs/ufs/ufsmount.h> 94#include <ufs/ufs/ufs_bswap.h> 95#include <ufs/ufs/ufs_extern.h> 96 97#include <uvm/uvm.h> 98#include <uvm/uvm_pmap.h> 99#include <uvm/uvm_stat.h> 100#include <uvm/uvm_pager.h> 101 102#include <ufs/lfs/lfs.h> 103#include <ufs/lfs/lfs_extern.h> 104 105extern pid_t lfs_writer_daemon; 106int lfs_ignore_lazy_sync = 1; 107 108/* Global vfs data structures for lfs. */ 109int (**lfs_vnodeop_p)(void *); 110const struct vnodeopv_entry_desc lfs_vnodeop_entries[] = { 111 { &vop_default_desc, vn_default_error }, 112 { &vop_lookup_desc, ufs_lookup }, /* lookup */ 113 { &vop_create_desc, lfs_create }, /* create */ 114 { &vop_whiteout_desc, ufs_whiteout }, /* whiteout */ 115 { &vop_mknod_desc, lfs_mknod }, /* mknod */ 116 { &vop_open_desc, ufs_open }, /* open */ 117 { &vop_close_desc, lfs_close }, /* close */ 118 { &vop_access_desc, ufs_access }, /* access */ 119 { &vop_getattr_desc, lfs_getattr }, /* getattr */ 120 { &vop_setattr_desc, lfs_setattr }, /* setattr */ 121 { &vop_read_desc, lfs_read }, /* read */ 122 { &vop_write_desc, lfs_write }, /* write */ 123 { &vop_ioctl_desc, ufs_ioctl }, /* ioctl */ 124 { &vop_fcntl_desc, lfs_fcntl }, /* fcntl */ 125 { &vop_poll_desc, ufs_poll }, /* poll */ 126 { &vop_kqfilter_desc, genfs_kqfilter }, /* kqfilter */ 127 { &vop_revoke_desc, ufs_revoke }, /* revoke */ 128 { &vop_mmap_desc, lfs_mmap }, /* mmap */ 129 { &vop_fsync_desc, lfs_fsync }, /* fsync */ 130 { &vop_seek_desc, ufs_seek }, /* seek */ 131 { &vop_remove_desc, lfs_remove }, /* remove */ 132 { &vop_link_desc, lfs_link }, /* link */ 133 { &vop_rename_desc, lfs_rename }, /* rename */ 134 { &vop_mkdir_desc, lfs_mkdir }, /* mkdir */ 135 { &vop_rmdir_desc, lfs_rmdir }, /* rmdir */ 136 { &vop_symlink_desc, lfs_symlink }, /* symlink */ 137 { &vop_readdir_desc, ufs_readdir }, /* readdir */ 138 { &vop_readlink_desc, ufs_readlink }, /* readlink */ 139 { &vop_abortop_desc, ufs_abortop }, /* abortop */ 140 { &vop_inactive_desc, lfs_inactive }, /* inactive */ 141 { &vop_reclaim_desc, lfs_reclaim }, /* reclaim */ 142 { &vop_lock_desc, ufs_lock }, /* lock */ 143 { &vop_unlock_desc, ufs_unlock }, /* unlock */ 144 { &vop_bmap_desc, ufs_bmap }, /* bmap */ 145 { &vop_strategy_desc, lfs_strategy }, /* strategy */ 146 { &vop_print_desc, ufs_print }, /* print */ 147 { &vop_islocked_desc, ufs_islocked }, /* islocked */ 148 { &vop_pathconf_desc, ufs_pathconf }, /* pathconf */ 149 { &vop_advlock_desc, ufs_advlock }, /* advlock */ 150 { &vop_bwrite_desc, lfs_bwrite }, /* bwrite */ 151 { &vop_getpages_desc, lfs_getpages }, /* getpages */ 152 { &vop_putpages_desc, lfs_putpages }, /* putpages */ 153 { NULL, NULL } 154}; 155const struct vnodeopv_desc lfs_vnodeop_opv_desc = 156 { &lfs_vnodeop_p, lfs_vnodeop_entries }; 157 158int (**lfs_specop_p)(void *); 159const struct vnodeopv_entry_desc lfs_specop_entries[] = { 160 { &vop_default_desc, vn_default_error }, 161 { &vop_lookup_desc, spec_lookup }, /* lookup */ 162 { &vop_create_desc, spec_create }, /* create */ 163 { &vop_mknod_desc, spec_mknod }, /* mknod */ 164 { &vop_open_desc, spec_open }, /* open */ 165 { &vop_close_desc, lfsspec_close }, /* close */ 166 { &vop_access_desc, ufs_access }, /* access */ 167 { &vop_getattr_desc, lfs_getattr }, /* getattr */ 168 { &vop_setattr_desc, lfs_setattr }, /* setattr */ 169 { &vop_read_desc, ufsspec_read }, /* read */ 170 { &vop_write_desc, ufsspec_write }, /* write */ 171 { &vop_ioctl_desc, spec_ioctl }, /* ioctl */ 172 { &vop_fcntl_desc, ufs_fcntl }, /* fcntl */ 173 { &vop_poll_desc, spec_poll }, /* poll */ 174 { &vop_kqfilter_desc, spec_kqfilter }, /* kqfilter */ 175 { &vop_revoke_desc, spec_revoke }, /* revoke */ 176 { &vop_mmap_desc, spec_mmap }, /* mmap */ 177 { &vop_fsync_desc, spec_fsync }, /* fsync */ 178 { &vop_seek_desc, spec_seek }, /* seek */ 179 { &vop_remove_desc, spec_remove }, /* remove */ 180 { &vop_link_desc, spec_link }, /* link */ 181 { &vop_rename_desc, spec_rename }, /* rename */ 182 { &vop_mkdir_desc, spec_mkdir }, /* mkdir */ 183 { &vop_rmdir_desc, spec_rmdir }, /* rmdir */ 184 { &vop_symlink_desc, spec_symlink }, /* symlink */ 185 { &vop_readdir_desc, spec_readdir }, /* readdir */ 186 { &vop_readlink_desc, spec_readlink }, /* readlink */ 187 { &vop_abortop_desc, spec_abortop }, /* abortop */ 188 { &vop_inactive_desc, lfs_inactive }, /* inactive */ 189 { &vop_reclaim_desc, lfs_reclaim }, /* reclaim */ 190 { &vop_lock_desc, ufs_lock }, /* lock */ 191 { &vop_unlock_desc, ufs_unlock }, /* unlock */ 192 { &vop_bmap_desc, spec_bmap }, /* bmap */ 193 { &vop_strategy_desc, spec_strategy }, /* strategy */ 194 { &vop_print_desc, ufs_print }, /* print */ 195 { &vop_islocked_desc, ufs_islocked }, /* islocked */ 196 { &vop_pathconf_desc, spec_pathconf }, /* pathconf */ 197 { &vop_advlock_desc, spec_advlock }, /* advlock */ 198 { &vop_bwrite_desc, vn_bwrite }, /* bwrite */ 199 { &vop_getpages_desc, spec_getpages }, /* getpages */ 200 { &vop_putpages_desc, spec_putpages }, /* putpages */ 201 { NULL, NULL } 202}; 203const struct vnodeopv_desc lfs_specop_opv_desc = 204 { &lfs_specop_p, lfs_specop_entries }; 205 206int (**lfs_fifoop_p)(void *); 207const struct vnodeopv_entry_desc lfs_fifoop_entries[] = { 208 { &vop_default_desc, vn_default_error }, 209 { &vop_lookup_desc, vn_fifo_bypass }, /* lookup */ 210 { &vop_create_desc, vn_fifo_bypass }, /* create */ 211 { &vop_mknod_desc, vn_fifo_bypass }, /* mknod */ 212 { &vop_open_desc, vn_fifo_bypass }, /* open */ 213 { &vop_close_desc, lfsfifo_close }, /* close */ 214 { &vop_access_desc, ufs_access }, /* access */ 215 { &vop_getattr_desc, lfs_getattr }, /* getattr */ 216 { &vop_setattr_desc, lfs_setattr }, /* setattr */ 217 { &vop_read_desc, ufsfifo_read }, /* read */ 218 { &vop_write_desc, ufsfifo_write }, /* write */ 219 { &vop_ioctl_desc, vn_fifo_bypass }, /* ioctl */ 220 { &vop_fcntl_desc, ufs_fcntl }, /* fcntl */ 221 { &vop_poll_desc, vn_fifo_bypass }, /* poll */ 222 { &vop_kqfilter_desc, vn_fifo_bypass }, /* kqfilter */ 223 { &vop_revoke_desc, vn_fifo_bypass }, /* revoke */ 224 { &vop_mmap_desc, vn_fifo_bypass }, /* mmap */ 225 { &vop_fsync_desc, vn_fifo_bypass }, /* fsync */ 226 { &vop_seek_desc, vn_fifo_bypass }, /* seek */ 227 { &vop_remove_desc, vn_fifo_bypass }, /* remove */ 228 { &vop_link_desc, vn_fifo_bypass }, /* link */ 229 { &vop_rename_desc, vn_fifo_bypass }, /* rename */ 230 { &vop_mkdir_desc, vn_fifo_bypass }, /* mkdir */ 231 { &vop_rmdir_desc, vn_fifo_bypass }, /* rmdir */ 232 { &vop_symlink_desc, vn_fifo_bypass }, /* symlink */ 233 { &vop_readdir_desc, vn_fifo_bypass }, /* readdir */ 234 { &vop_readlink_desc, vn_fifo_bypass }, /* readlink */ 235 { &vop_abortop_desc, vn_fifo_bypass }, /* abortop */ 236 { &vop_inactive_desc, lfs_inactive }, /* inactive */ 237 { &vop_reclaim_desc, lfs_reclaim }, /* reclaim */ 238 { &vop_lock_desc, ufs_lock }, /* lock */ 239 { &vop_unlock_desc, ufs_unlock }, /* unlock */ 240 { &vop_bmap_desc, vn_fifo_bypass }, /* bmap */ 241 { &vop_strategy_desc, vn_fifo_bypass }, /* strategy */ 242 { &vop_print_desc, ufs_print }, /* print */ 243 { &vop_islocked_desc, ufs_islocked }, /* islocked */ 244 { &vop_pathconf_desc, vn_fifo_bypass }, /* pathconf */ 245 { &vop_advlock_desc, vn_fifo_bypass }, /* advlock */ 246 { &vop_bwrite_desc, lfs_bwrite }, /* bwrite */ 247 { &vop_putpages_desc, vn_fifo_bypass }, /* putpages */ 248 { NULL, NULL } 249}; 250const struct vnodeopv_desc lfs_fifoop_opv_desc = 251 { &lfs_fifoop_p, lfs_fifoop_entries }; 252 253static int check_dirty(struct lfs *, struct vnode *, off_t, off_t, off_t, int, int, struct vm_page **); 254 255#define LFS_READWRITE 256#include <ufs/ufs/ufs_readwrite.c> 257#undef LFS_READWRITE 258 259/* 260 * Synch an open file. 261 */ 262/* ARGSUSED */ 263int 264lfs_fsync(void *v) 265{ 266 struct vop_fsync_args /* { 267 struct vnode *a_vp; 268 kauth_cred_t a_cred; 269 int a_flags; 270 off_t offlo; 271 off_t offhi; 272 } */ *ap = v; 273 struct vnode *vp = ap->a_vp; 274 int error, wait; 275 struct inode *ip = VTOI(vp); 276 struct lfs *fs = ip->i_lfs; 277 278 /* If we're mounted read-only, don't try to sync. */ 279 if (fs->lfs_ronly) 280 return 0; 281 282 /* If a removed vnode is being cleaned, no need to sync here. */ 283 if ((ap->a_flags & FSYNC_RECLAIM) != 0 && ip->i_mode == 0) 284 return 0; 285 286 /* 287 * Trickle sync simply adds this vnode to the pager list, as if 288 * the pagedaemon had requested a pageout. 289 */ 290 if (ap->a_flags & FSYNC_LAZY) { 291 if (lfs_ignore_lazy_sync == 0) { 292 mutex_enter(&lfs_lock); 293 if (!(ip->i_flags & IN_PAGING)) { 294 ip->i_flags |= IN_PAGING; 295 TAILQ_INSERT_TAIL(&fs->lfs_pchainhd, ip, 296 i_lfs_pchain); 297 } 298 wakeup(&lfs_writer_daemon); 299 mutex_exit(&lfs_lock); 300 } 301 return 0; 302 } 303 304 /* 305 * If a vnode is bring cleaned, flush it out before we try to 306 * reuse it. This prevents the cleaner from writing files twice 307 * in the same partial segment, causing an accounting underflow. 308 */ 309 if (ap->a_flags & FSYNC_RECLAIM && ip->i_flags & IN_CLEANING) { 310 lfs_vflush(vp); 311 } 312 313 wait = (ap->a_flags & FSYNC_WAIT); 314 do { 315 mutex_enter(vp->v_interlock); 316 error = VOP_PUTPAGES(vp, trunc_page(ap->a_offlo), 317 round_page(ap->a_offhi), 318 PGO_CLEANIT | (wait ? PGO_SYNCIO : 0)); 319 if (error == EAGAIN) { 320 mutex_enter(&lfs_lock); 321 mtsleep(&fs->lfs_avail, PCATCH | PUSER, "lfs_fsync", 322 hz / 100 + 1, &lfs_lock); 323 mutex_exit(&lfs_lock); 324 } 325 } while (error == EAGAIN); 326 if (error) 327 return error; 328 329 if ((ap->a_flags & FSYNC_DATAONLY) == 0) 330 error = lfs_update(vp, NULL, NULL, wait ? UPDATE_WAIT : 0); 331 332 if (error == 0 && ap->a_flags & FSYNC_CACHE) { 333 int l = 0; 334 error = VOP_IOCTL(ip->i_devvp, DIOCCACHESYNC, &l, FWRITE, 335 curlwp->l_cred); 336 } 337 if (wait && !VPISEMPTY(vp)) 338 LFS_SET_UINO(ip, IN_MODIFIED); 339 340 return error; 341} 342 343/* 344 * Take IN_ADIROP off, then call ufs_inactive. 345 */ 346int 347lfs_inactive(void *v) 348{ 349 struct vop_inactive_args /* { 350 struct vnode *a_vp; 351 } */ *ap = v; 352 353 lfs_unmark_vnode(ap->a_vp); 354 355 /* 356 * The Ifile is only ever inactivated on unmount. 357 * Streamline this process by not giving it more dirty blocks. 358 */ 359 if (VTOI(ap->a_vp)->i_number == LFS_IFILE_INUM) { 360 mutex_enter(&lfs_lock); 361 LFS_CLR_UINO(VTOI(ap->a_vp), IN_ALLMOD); 362 mutex_exit(&lfs_lock); 363 VOP_UNLOCK(ap->a_vp); 364 return 0; 365 } 366 367#ifdef DEBUG 368 /* 369 * This might happen on unmount. 370 * XXX If it happens at any other time, it should be a panic. 371 */ 372 if (ap->a_vp->v_uflag & VU_DIROP) { 373 struct inode *ip = VTOI(ap->a_vp); 374 printf("lfs_inactive: inactivating VU_DIROP? ino = %d\n", (int)ip->i_number); 375 } 376#endif /* DIAGNOSTIC */ 377 378 return ufs_inactive(v); 379} 380 381/* 382 * These macros are used to bracket UFS directory ops, so that we can 383 * identify all the pages touched during directory ops which need to 384 * be ordered and flushed atomically, so that they may be recovered. 385 * 386 * Because we have to mark nodes VU_DIROP in order to prevent 387 * the cache from reclaiming them while a dirop is in progress, we must 388 * also manage the number of nodes so marked (otherwise we can run out). 389 * We do this by setting lfs_dirvcount to the number of marked vnodes; it 390 * is decremented during segment write, when VU_DIROP is taken off. 391 */ 392#define MARK_VNODE(vp) lfs_mark_vnode(vp) 393#define UNMARK_VNODE(vp) lfs_unmark_vnode(vp) 394#define SET_DIROP_CREATE(dvp, vpp) lfs_set_dirop_create((dvp), (vpp)) 395#define SET_DIROP_REMOVE(dvp, vp) lfs_set_dirop((dvp), (vp)) 396static int lfs_set_dirop_create(struct vnode *, struct vnode **); 397static int lfs_set_dirop(struct vnode *, struct vnode *); 398 399static int 400lfs_set_dirop(struct vnode *dvp, struct vnode *vp) 401{ 402 struct lfs *fs; 403 int error; 404 405 KASSERT(VOP_ISLOCKED(dvp)); 406 KASSERT(vp == NULL || VOP_ISLOCKED(vp)); 407 408 fs = VTOI(dvp)->i_lfs; 409 410 ASSERT_NO_SEGLOCK(fs); 411 /* 412 * LFS_NRESERVE calculates direct and indirect blocks as well 413 * as an inode block; an overestimate in most cases. 414 */ 415 if ((error = lfs_reserve(fs, dvp, vp, LFS_NRESERVE(fs))) != 0) 416 return (error); 417 418 restart: 419 mutex_enter(&lfs_lock); 420 if (fs->lfs_dirops == 0) { 421 mutex_exit(&lfs_lock); 422 lfs_check(dvp, LFS_UNUSED_LBN, 0); 423 mutex_enter(&lfs_lock); 424 } 425 while (fs->lfs_writer) { 426 error = mtsleep(&fs->lfs_dirops, (PRIBIO + 1) | PCATCH, 427 "lfs_sdirop", 0, &lfs_lock); 428 if (error == EINTR) { 429 mutex_exit(&lfs_lock); 430 goto unreserve; 431 } 432 } 433 if (lfs_dirvcount > LFS_MAX_DIROP && fs->lfs_dirops == 0) { 434 wakeup(&lfs_writer_daemon); 435 mutex_exit(&lfs_lock); 436 preempt(); 437 goto restart; 438 } 439 440 if (lfs_dirvcount > LFS_MAX_DIROP) { 441 DLOG((DLOG_DIROP, "lfs_set_dirop: sleeping with dirops=%d, " 442 "dirvcount=%d\n", fs->lfs_dirops, lfs_dirvcount)); 443 if ((error = mtsleep(&lfs_dirvcount, 444 PCATCH | PUSER | PNORELOCK, "lfs_maxdirop", 0, 445 &lfs_lock)) != 0) { 446 goto unreserve; 447 } 448 goto restart; 449 } 450 451 ++fs->lfs_dirops; 452 /* fs->lfs_doifile = 1; */ /* XXX why? --ks */ 453 mutex_exit(&lfs_lock); 454 455 /* Hold a reference so SET_ENDOP will be happy */ 456 vref(dvp); 457 if (vp) { 458 vref(vp); 459 MARK_VNODE(vp); 460 } 461 462 MARK_VNODE(dvp); 463 return 0; 464 465 unreserve: 466 lfs_reserve(fs, dvp, vp, -LFS_NRESERVE(fs)); 467 return error; 468} 469 470/* 471 * Get a new vnode *before* adjusting the dirop count, to avoid a deadlock 472 * in getnewvnode(), if we have a stacked filesystem mounted on top 473 * of us. 474 * 475 * NB: this means we have to clear the new vnodes on error. Fortunately 476 * SET_ENDOP is there to do that for us. 477 */ 478static int 479lfs_set_dirop_create(struct vnode *dvp, struct vnode **vpp) 480{ 481 int error; 482 struct lfs *fs; 483 484 fs = VFSTOUFS(dvp->v_mount)->um_lfs; 485 ASSERT_NO_SEGLOCK(fs); 486 if (fs->lfs_ronly) 487 return EROFS; 488 if (vpp == NULL) { 489 return lfs_set_dirop(dvp, NULL); 490 } 491 error = getnewvnode(VT_LFS, dvp->v_mount, lfs_vnodeop_p, NULL, vpp); 492 if (error) { 493 DLOG((DLOG_ALLOC, "lfs_set_dirop_create: dvp %p error %d\n", 494 dvp, error)); 495 return error; 496 } 497 if ((error = lfs_set_dirop(dvp, NULL)) != 0) { 498 ungetnewvnode(*vpp); 499 *vpp = NULL; 500 return error; 501 } 502 return 0; 503} 504 505#define SET_ENDOP_BASE(fs, dvp, str) \ 506 do { \ 507 mutex_enter(&lfs_lock); \ 508 --(fs)->lfs_dirops; \ 509 if (!(fs)->lfs_dirops) { \ 510 if ((fs)->lfs_nadirop) { \ 511 panic("SET_ENDOP: %s: no dirops but " \ 512 " nadirop=%d", (str), \ 513 (fs)->lfs_nadirop); \ 514 } \ 515 wakeup(&(fs)->lfs_writer); \ 516 mutex_exit(&lfs_lock); \ 517 lfs_check((dvp), LFS_UNUSED_LBN, 0); \ 518 } else \ 519 mutex_exit(&lfs_lock); \ 520 } while(0) 521#define SET_ENDOP_CREATE(fs, dvp, nvpp, str) \ 522 do { \ 523 UNMARK_VNODE(dvp); \ 524 if (nvpp && *nvpp) \ 525 UNMARK_VNODE(*nvpp); \ 526 /* Check for error return to stem vnode leakage */ \ 527 if (nvpp && *nvpp && !((*nvpp)->v_uflag & VU_DIROP)) \ 528 ungetnewvnode(*(nvpp)); \ 529 SET_ENDOP_BASE((fs), (dvp), (str)); \ 530 lfs_reserve((fs), (dvp), NULL, -LFS_NRESERVE(fs)); \ 531 vrele(dvp); \ 532 } while(0) 533#define SET_ENDOP_CREATE_AP(ap, str) \ 534 SET_ENDOP_CREATE(VTOI((ap)->a_dvp)->i_lfs, (ap)->a_dvp, \ 535 (ap)->a_vpp, (str)) 536#define SET_ENDOP_REMOVE(fs, dvp, ovp, str) \ 537 do { \ 538 UNMARK_VNODE(dvp); \ 539 if (ovp) \ 540 UNMARK_VNODE(ovp); \ 541 SET_ENDOP_BASE((fs), (dvp), (str)); \ 542 lfs_reserve((fs), (dvp), (ovp), -LFS_NRESERVE(fs)); \ 543 vrele(dvp); \ 544 if (ovp) \ 545 vrele(ovp); \ 546 } while(0) 547 548void 549lfs_mark_vnode(struct vnode *vp) 550{ 551 struct inode *ip = VTOI(vp); 552 struct lfs *fs = ip->i_lfs; 553 554 mutex_enter(&lfs_lock); 555 if (!(ip->i_flag & IN_ADIROP)) { 556 if (!(vp->v_uflag & VU_DIROP)) { 557 mutex_exit(&lfs_lock); 558 mutex_enter(vp->v_interlock); 559 if (lfs_vref(vp) != 0) 560 panic("lfs_mark_vnode: could not vref"); 561 mutex_enter(&lfs_lock); 562 ++lfs_dirvcount; 563 ++fs->lfs_dirvcount; 564 TAILQ_INSERT_TAIL(&fs->lfs_dchainhd, ip, i_lfs_dchain); 565 vp->v_uflag |= VU_DIROP; 566 } 567 ++fs->lfs_nadirop; 568 ip->i_flag &= ~IN_CDIROP; 569 ip->i_flag |= IN_ADIROP; 570 } else 571 KASSERT(vp->v_uflag & VU_DIROP); 572 mutex_exit(&lfs_lock); 573} 574 575void 576lfs_unmark_vnode(struct vnode *vp) 577{ 578 struct inode *ip = VTOI(vp); 579 580 mutex_enter(&lfs_lock); 581 if (ip && (ip->i_flag & IN_ADIROP)) { 582 KASSERT(vp->v_uflag & VU_DIROP); 583 --ip->i_lfs->lfs_nadirop; 584 ip->i_flag &= ~IN_ADIROP; 585 } 586 mutex_exit(&lfs_lock); 587} 588 589int 590lfs_symlink(void *v) 591{ 592 struct vop_symlink_args /* { 593 struct vnode *a_dvp; 594 struct vnode **a_vpp; 595 struct componentname *a_cnp; 596 struct vattr *a_vap; 597 char *a_target; 598 } */ *ap = v; 599 int error; 600 601 if ((error = SET_DIROP_CREATE(ap->a_dvp, ap->a_vpp)) != 0) { 602 vput(ap->a_dvp); 603 return error; 604 } 605 error = ufs_symlink(ap); 606 SET_ENDOP_CREATE_AP(ap, "symlink"); 607 return (error); 608} 609 610int 611lfs_mknod(void *v) 612{ 613 struct vop_mknod_args /* { 614 struct vnode *a_dvp; 615 struct vnode **a_vpp; 616 struct componentname *a_cnp; 617 struct vattr *a_vap; 618 } */ *ap = v; 619 struct vattr *vap = ap->a_vap; 620 struct vnode **vpp = ap->a_vpp; 621 struct inode *ip; 622 int error; 623 struct mount *mp; 624 ino_t ino; 625 struct ufs_lookup_results *ulr; 626 627 /* XXX should handle this material another way */ 628 ulr = &VTOI(ap->a_dvp)->i_crap; 629 UFS_CHECK_CRAPCOUNTER(VTOI(ap->a_dvp)); 630 631 if ((error = SET_DIROP_CREATE(ap->a_dvp, ap->a_vpp)) != 0) { 632 vput(ap->a_dvp); 633 return error; 634 } 635 error = ufs_makeinode(MAKEIMODE(vap->va_type, vap->va_mode), 636 ap->a_dvp, ulr, vpp, ap->a_cnp); 637 638 /* Either way we're done with the dirop at this point */ 639 SET_ENDOP_CREATE_AP(ap, "mknod"); 640 641 if (error) 642 return (error); 643 644 ip = VTOI(*vpp); 645 mp = (*vpp)->v_mount; 646 ino = ip->i_number; 647 ip->i_flag |= IN_ACCESS | IN_CHANGE | IN_UPDATE; 648 if (vap->va_rdev != VNOVAL) { 649 /* 650 * Want to be able to use this to make badblock 651 * inodes, so don't truncate the dev number. 652 */ 653#if 0 654 ip->i_ffs1_rdev = ufs_rw32(vap->va_rdev, 655 UFS_MPNEEDSWAP((*vpp)->v_mount)); 656#else 657 ip->i_ffs1_rdev = vap->va_rdev; 658#endif 659 } 660 661 /* 662 * Call fsync to write the vnode so that we don't have to deal with 663 * flushing it when it's marked VU_DIROP|VI_XLOCK. 664 * 665 * XXX KS - If we can't flush we also can't call vgone(), so must 666 * return. But, that leaves this vnode in limbo, also not good. 667 * Can this ever happen (barring hardware failure)? 668 */ 669 if ((error = VOP_FSYNC(*vpp, NOCRED, FSYNC_WAIT, 0, 0)) != 0) { 670 panic("lfs_mknod: couldn't fsync (ino %llu)", 671 (unsigned long long)ino); 672 /* return (error); */ 673 } 674 /* 675 * Remove vnode so that it will be reloaded by VFS_VGET and 676 * checked to see if it is an alias of an existing entry in 677 * the inode cache. 678 */ 679 /* Used to be vput, but that causes us to call VOP_INACTIVE twice. */ 680 681 VOP_UNLOCK(*vpp); 682 (*vpp)->v_type = VNON; 683 vgone(*vpp); 684 error = VFS_VGET(mp, ino, vpp); 685 686 if (error != 0) { 687 *vpp = NULL; 688 return (error); 689 } 690 return (0); 691} 692 693int 694lfs_create(void *v) 695{ 696 struct vop_create_args /* { 697 struct vnode *a_dvp; 698 struct vnode **a_vpp; 699 struct componentname *a_cnp; 700 struct vattr *a_vap; 701 } */ *ap = v; 702 int error; 703 704 if ((error = SET_DIROP_CREATE(ap->a_dvp, ap->a_vpp)) != 0) { 705 vput(ap->a_dvp); 706 return error; 707 } 708 error = ufs_create(ap); 709 SET_ENDOP_CREATE_AP(ap, "create"); 710 return (error); 711} 712 713int 714lfs_mkdir(void *v) 715{ 716 struct vop_mkdir_args /* { 717 struct vnode *a_dvp; 718 struct vnode **a_vpp; 719 struct componentname *a_cnp; 720 struct vattr *a_vap; 721 } */ *ap = v; 722 int error; 723 724 if ((error = SET_DIROP_CREATE(ap->a_dvp, ap->a_vpp)) != 0) { 725 vput(ap->a_dvp); 726 return error; 727 } 728 error = ufs_mkdir(ap); 729 SET_ENDOP_CREATE_AP(ap, "mkdir"); 730 return (error); 731} 732 733int 734lfs_remove(void *v) 735{ 736 struct vop_remove_args /* { 737 struct vnode *a_dvp; 738 struct vnode *a_vp; 739 struct componentname *a_cnp; 740 } */ *ap = v; 741 struct vnode *dvp, *vp; 742 struct inode *ip; 743 int error; 744 745 dvp = ap->a_dvp; 746 vp = ap->a_vp; 747 ip = VTOI(vp); 748 if ((error = SET_DIROP_REMOVE(dvp, vp)) != 0) { 749 if (dvp == vp) 750 vrele(vp); 751 else 752 vput(vp); 753 vput(dvp); 754 return error; 755 } 756 error = ufs_remove(ap); 757 if (ip->i_nlink == 0) 758 lfs_orphan(ip->i_lfs, ip->i_number); 759 SET_ENDOP_REMOVE(ip->i_lfs, dvp, ap->a_vp, "remove"); 760 return (error); 761} 762 763int 764lfs_rmdir(void *v) 765{ 766 struct vop_rmdir_args /* { 767 struct vnodeop_desc *a_desc; 768 struct vnode *a_dvp; 769 struct vnode *a_vp; 770 struct componentname *a_cnp; 771 } */ *ap = v; 772 struct vnode *vp; 773 struct inode *ip; 774 int error; 775 776 vp = ap->a_vp; 777 ip = VTOI(vp); 778 if ((error = SET_DIROP_REMOVE(ap->a_dvp, ap->a_vp)) != 0) { 779 if (ap->a_dvp == vp) 780 vrele(ap->a_dvp); 781 else 782 vput(ap->a_dvp); 783 vput(vp); 784 return error; 785 } 786 error = ufs_rmdir(ap); 787 if (ip->i_nlink == 0) 788 lfs_orphan(ip->i_lfs, ip->i_number); 789 SET_ENDOP_REMOVE(ip->i_lfs, ap->a_dvp, ap->a_vp, "rmdir"); 790 return (error); 791} 792 793int 794lfs_link(void *v) 795{ 796 struct vop_link_args /* { 797 struct vnode *a_dvp; 798 struct vnode *a_vp; 799 struct componentname *a_cnp; 800 } */ *ap = v; 801 int error; 802 struct vnode **vpp = NULL; 803 804 if ((error = SET_DIROP_CREATE(ap->a_dvp, vpp)) != 0) { 805 vput(ap->a_dvp); 806 return error; 807 } 808 error = ufs_link(ap); 809 SET_ENDOP_CREATE(VTOI(ap->a_dvp)->i_lfs, ap->a_dvp, vpp, "link"); 810 return (error); 811} 812 813/* XXX following lifted from ufs_lookup.c */ 814#define FSFMT(vp) (((vp)->v_mount->mnt_iflag & IMNT_DTYPE) == 0) 815 816/* 817 * Check if either entry referred to by FROM_ULR is within the range 818 * of entries named by TO_ULR. 819 */ 820static int 821ulr_overlap(const struct ufs_lookup_results *from_ulr, 822 const struct ufs_lookup_results *to_ulr) 823{ 824 doff_t from_start, from_prevstart; 825 doff_t to_start, to_end; 826 827 /* 828 * FROM is a DELETE result; offset points to the entry to 829 * remove and subtracting count gives the previous entry. 830 */ 831 from_start = from_ulr->ulr_offset - from_ulr->ulr_count; 832 from_prevstart = from_ulr->ulr_offset; 833 834 /* 835 * TO is a RENAME (thus non-DELETE) result; offset points 836 * to the beginning of a region to write in, and adding 837 * count gives the end of the region. 838 */ 839 to_start = to_ulr->ulr_offset; 840 to_end = to_ulr->ulr_offset + to_ulr->ulr_count; 841 842 if (from_prevstart >= to_start && from_prevstart < to_end) { 843 return 1; 844 } 845 if (from_start >= to_start && from_start < to_end) { 846 return 1; 847 } 848 return 0; 849} 850 851/* 852 * A virgin directory (no blushing please). 853 */ 854static const struct dirtemplate mastertemplate = { 855 0, 12, DT_DIR, 1, ".", 856 0, DIRBLKSIZ - 12, DT_DIR, 2, ".." 857}; 858 859/* 860 * Wrapper for relookup that also updates the supplemental results. 861 */ 862static int 863do_relookup(struct vnode *dvp, struct ufs_lookup_results *ulr, 864 struct vnode **vp, struct componentname *cnp) 865{ 866 int error; 867 868 error = relookup(dvp, vp, cnp, 0); 869 if (error) { 870 return error; 871 } 872 /* update the supplemental reasults */ 873 *ulr = VTOI(dvp)->i_crap; 874 UFS_CHECK_CRAPCOUNTER(VTOI(dvp)); 875 return 0; 876} 877 878/* 879 * Lock and relookup a sequence of two directories and two children. 880 * 881 */ 882static int 883lock_vnode_sequence(struct vnode *d1, struct ufs_lookup_results *ulr1, 884 struct vnode **v1_ret, struct componentname *cn1, 885 int v1_missing_ok, 886 int overlap_error, 887 struct vnode *d2, struct ufs_lookup_results *ulr2, 888 struct vnode **v2_ret, struct componentname *cn2, 889 int v2_missing_ok) 890{ 891 struct vnode *v1, *v2; 892 int error; 893 894 KASSERT(d1 != d2); 895 896 vn_lock(d1, LK_EXCLUSIVE | LK_RETRY); 897 if (VTOI(d1)->i_size == 0) { 898 /* d1 has been rmdir'd */ 899 VOP_UNLOCK(d1); 900 return ENOENT; 901 } 902 error = do_relookup(d1, ulr1, &v1, cn1); 903 if (v1_missing_ok) { 904 if (error == ENOENT) { 905 /* 906 * Note: currently if the name doesn't exist, 907 * relookup succeeds (it intercepts the 908 * EJUSTRETURN from VOP_LOOKUP) and sets tvp 909 * to NULL. Therefore, we will never get 910 * ENOENT and this branch is not needed. 911 * However, in a saner future the EJUSTRETURN 912 * garbage will go away, so let's DTRT. 913 */ 914 v1 = NULL; 915 error = 0; 916 } 917 } else { 918 if (error == 0 && v1 == NULL) { 919 /* This is what relookup sets if v1 disappeared. */ 920 error = ENOENT; 921 } 922 } 923 if (error) { 924 VOP_UNLOCK(d1); 925 return error; 926 } 927 if (v1 && v1 == d2) { 928 VOP_UNLOCK(d1); 929 VOP_UNLOCK(v1); 930 vrele(v1); 931 return overlap_error; 932 } 933 934 /* 935 * The right way to do this is to do lookups without locking 936 * the results, and lock the results afterwards; then at the 937 * end we can avoid trying to lock v2 if v2 == v1. 938 * 939 * However, for the reasons described in the fdvp == tdvp case 940 * in rename below, we can't do that safely. So, in the case 941 * where v1 is not a directory, unlock it and lock it again 942 * afterwards. This is safe in locking order because a 943 * non-directory can't be above anything else in the tree. If 944 * v1 *is* a directory, that's not true, but then because d1 945 * != d2, v1 != v2. 946 */ 947 if (v1 && v1->v_type != VDIR) { 948 VOP_UNLOCK(v1); 949 } 950 vn_lock(d2, LK_EXCLUSIVE | LK_RETRY); 951 if (VTOI(d2)->i_size == 0) { 952 /* d2 has been rmdir'd */ 953 VOP_UNLOCK(d2); 954 if (v1 && v1->v_type == VDIR) { 955 VOP_UNLOCK(v1); 956 } 957 VOP_UNLOCK(d1); 958 if (v1) { 959 vrele(v1); 960 } 961 return ENOENT; 962 } 963 error = do_relookup(d2, ulr2, &v2, cn2); 964 if (v2_missing_ok) { 965 if (error == ENOENT) { 966 /* as above */ 967 v2 = NULL; 968 error = 0; 969 } 970 } else { 971 if (error == 0 && v2 == NULL) { 972 /* This is what relookup sets if v2 disappeared. */ 973 error = ENOENT; 974 } 975 } 976 if (error) { 977 VOP_UNLOCK(d2); 978 if (v1 && v1->v_type == VDIR) { 979 VOP_UNLOCK(v1); 980 } 981 VOP_UNLOCK(d1); 982 if (v1) { 983 vrele(v1); 984 } 985 return error; 986 } 987 if (v1 && v1->v_type != VDIR && v1 != v2) { 988 vn_lock(v1, LK_EXCLUSIVE | LK_RETRY); 989 } 990 *v1_ret = v1; 991 *v2_ret = v2; 992 return 0; 993} 994 995int 996lfs_rename(void *v) 997{ 998 struct vop_rename_args /* { 999 struct vnode *a_fdvp; 1000 struct vnode *a_fvp; 1001 struct componentname *a_fcnp; 1002 struct vnode *a_tdvp; 1003 struct vnode *a_tvp; 1004 struct componentname *a_tcnp; 1005 } */ *ap = v; 1006 struct vnode *tvp, *tdvp, *fvp, *fdvp; 1007 struct componentname *tcnp, *fcnp; 1008 struct inode *ip, *txp, *fxp, *tdp, *fdp; 1009 struct mount *mp; 1010 struct direct *newdir; 1011 int doingdirectory, error, marked; 1012 ino_t oldparent, newparent; 1013 1014 struct ufs_lookup_results from_ulr, to_ulr; 1015 struct lfs *fs = VTOI(ap->a_fvp)->i_lfs; 1016 1017 tvp = ap->a_tvp; 1018 tdvp = ap->a_tdvp; 1019 fvp = ap->a_fvp; 1020 fdvp = ap->a_fdvp; 1021 tcnp = ap->a_tcnp; 1022 fcnp = ap->a_fcnp; 1023 doingdirectory = error = 0; 1024 oldparent = newparent = 0; 1025 marked = 0; 1026 1027 /* save the supplemental lookup results as they currently exist */ 1028 from_ulr = VTOI(fdvp)->i_crap; 1029 to_ulr = VTOI(tdvp)->i_crap; 1030 UFS_CHECK_CRAPCOUNTER(VTOI(fdvp)); 1031 UFS_CHECK_CRAPCOUNTER(VTOI(tdvp)); 1032 1033 /* 1034 * Owing to VFS oddities we are currently called with tdvp/tvp 1035 * locked and not fdvp/fvp. In a sane world we'd be passed 1036 * tdvp and fdvp only, unlocked, and two name strings. Pretend 1037 * we have a sane world and unlock tdvp and tvp. 1038 */ 1039 VOP_UNLOCK(tdvp); 1040 if (tvp && tvp != tdvp) { 1041 VOP_UNLOCK(tvp); 1042 } 1043 1044 /* Also pretend we have a sane world and vrele fvp/tvp. */ 1045 vrele(fvp); 1046 fvp = NULL; 1047 if (tvp) { 1048 vrele(tvp); 1049 tvp = NULL; 1050 } 1051 1052 /* 1053 * Check for cross-device rename. 1054 */ 1055 if (fdvp->v_mount != tdvp->v_mount) { 1056 error = EXDEV; 1057 goto abort; 1058 } 1059 1060 /* 1061 * Reject "." and ".." 1062 */ 1063 if ((fcnp->cn_flags & ISDOTDOT) || (tcnp->cn_flags & ISDOTDOT) || 1064 (fcnp->cn_namelen == 1 && fcnp->cn_nameptr[0] == '.') || 1065 (tcnp->cn_namelen == 1 && tcnp->cn_nameptr[0] == '.')) { 1066 error = EINVAL; 1067 goto abort; 1068 } 1069 1070 /* 1071 * Get locks. 1072 */ 1073 1074 /* paranoia */ 1075 fcnp->cn_flags |= LOCKPARENT|LOCKLEAF; 1076 tcnp->cn_flags |= LOCKPARENT|LOCKLEAF; 1077 1078 if (fdvp == tdvp) { 1079 /* One directory. Lock it and relookup both children. */ 1080 vn_lock(fdvp, LK_EXCLUSIVE | LK_RETRY); 1081 1082 if (VTOI(fdvp)->i_size == 0) { 1083 /* directory has been rmdir'd */ 1084 VOP_UNLOCK(fdvp); 1085 error = ENOENT; 1086 goto abort; 1087 } 1088 1089 error = do_relookup(fdvp, &from_ulr, &fvp, fcnp); 1090 if (error == 0 && fvp == NULL) { 1091 /* relookup may produce this if fvp disappears */ 1092 error = ENOENT; 1093 } 1094 if (error) { 1095 VOP_UNLOCK(fdvp); 1096 goto abort; 1097 } 1098 1099 /* 1100 * The right way to do this is to look up both children 1101 * without locking either, and then lock both unless they 1102 * turn out to be the same. However, due to deep-seated 1103 * VFS-level issues all lookups lock the child regardless 1104 * of whether LOCKLEAF is set (if LOCKLEAF is not set, 1105 * the child is locked during lookup and then unlocked) 1106 * so it is not safe to look up tvp while fvp is locked. 1107 * 1108 * Unlocking fvp here temporarily is more or less safe, 1109 * because with the directory locked there's not much 1110 * that can happen to it. However, ideally it wouldn't 1111 * be necessary. XXX. 1112 */ 1113 VOP_UNLOCK(fvp); 1114 /* remember fdvp == tdvp so tdvp is locked */ 1115 error = do_relookup(tdvp, &to_ulr, &tvp, tcnp); 1116 if (error && error != ENOENT) { 1117 VOP_UNLOCK(fdvp); 1118 goto abort; 1119 } 1120 if (error == ENOENT) { 1121 /* 1122 * Note: currently if the name doesn't exist, 1123 * relookup succeeds (it intercepts the 1124 * EJUSTRETURN from VOP_LOOKUP) and sets tvp 1125 * to NULL. Therefore, we will never get 1126 * ENOENT and this branch is not needed. 1127 * However, in a saner future the EJUSTRETURN 1128 * garbage will go away, so let's DTRT. 1129 */ 1130 tvp = NULL; 1131 } 1132 1133 /* tvp is locked; lock fvp if necessary */ 1134 if (!tvp || tvp != fvp) { 1135 vn_lock(fvp, LK_EXCLUSIVE | LK_RETRY); 1136 } 1137 } else { 1138 int found_fdvp; 1139 struct vnode *illegal_fvp; 1140 1141 /* 1142 * The source must not be above the destination. (If 1143 * it were, the rename would detach a section of the 1144 * tree.) 1145 * 1146 * Look up the tree from tdvp to see if we find fdvp, 1147 * and if so, return the immediate child of fdvp we're 1148 * under; that must not turn out to be the same as 1149 * fvp. 1150 * 1151 * The per-volume rename lock guarantees that the 1152 * result of this check remains true until we finish 1153 * looking up and locking. 1154 */ 1155 error = ufs_parentcheck(fdvp, tdvp, fcnp->cn_cred, 1156 &found_fdvp, &illegal_fvp); 1157 if (error) { 1158 goto abort; 1159 } 1160 1161 /* Must lock in tree order. */ 1162 1163 if (found_fdvp) { 1164 /* fdvp -> fvp -> tdvp -> tvp */ 1165 error = lock_vnode_sequence(fdvp, &from_ulr, 1166 &fvp, fcnp, 0, 1167 EINVAL, 1168 tdvp, &to_ulr, 1169 &tvp, tcnp, 1); 1170 } else { 1171 /* tdvp -> tvp -> fdvp -> fvp */ 1172 error = lock_vnode_sequence(tdvp, &to_ulr, 1173 &tvp, tcnp, 1, 1174 ENOTEMPTY, 1175 fdvp, &from_ulr, 1176 &fvp, fcnp, 0); 1177 } 1178 if (error) { 1179 if (illegal_fvp) { 1180 vrele(illegal_fvp); 1181 } 1182 goto abort; 1183 } 1184 KASSERT(fvp != NULL); 1185 1186 if (illegal_fvp && fvp == illegal_fvp) { 1187 vrele(illegal_fvp); 1188 error = EINVAL; 1189 goto abort_withlocks; 1190 } 1191 1192 if (illegal_fvp) { 1193 vrele(illegal_fvp); 1194 } 1195 } 1196 1197 KASSERT(fdvp && VOP_ISLOCKED(fdvp)); 1198 KASSERT(fvp && VOP_ISLOCKED(fvp)); 1199 KASSERT(tdvp && VOP_ISLOCKED(tdvp)); 1200 KASSERT(tvp == NULL || VOP_ISLOCKED(tvp)); 1201 1202 /* --- everything is now locked --- */ 1203 1204 if (tvp && ((VTOI(tvp)->i_flags & (IMMUTABLE | APPEND)) || 1205 (VTOI(tdvp)->i_flags & APPEND))) { 1206 error = EPERM; 1207 goto abort_withlocks; 1208 } 1209 1210 /* 1211 * Check if just deleting a link name. 1212 */ 1213 if (fvp == tvp) { 1214 if (fvp->v_type == VDIR) { 1215 error = EINVAL; 1216 goto abort_withlocks; 1217 } 1218 1219 /* Release destination completely. Leave fdvp locked. */ 1220 VOP_ABORTOP(tdvp, tcnp); 1221 if (fdvp != tdvp) { 1222 VOP_UNLOCK(tdvp); 1223 } 1224 VOP_UNLOCK(tvp); 1225 vrele(tdvp); 1226 vrele(tvp); 1227 1228 /* Delete source. */ 1229 /* XXX: do we really need to relookup again? */ 1230 1231 /* 1232 * fdvp is still locked, but we just unlocked fvp 1233 * (because fvp == tvp) so just decref fvp 1234 */ 1235 vrele(fvp); 1236 fcnp->cn_flags &= ~(MODMASK); 1237 fcnp->cn_flags |= LOCKPARENT | LOCKLEAF; 1238 fcnp->cn_nameiop = DELETE; 1239 if ((error = relookup(fdvp, &fvp, fcnp, 0))) { 1240 vput(fdvp); 1241 return (error); 1242 } 1243 return (VOP_REMOVE(fdvp, fvp, fcnp)); 1244 } 1245 1246 /* The tiny bit of actual LFS code in this function */ 1247 if ((error = SET_DIROP_REMOVE(tdvp, tvp)) != 0) 1248 goto abort_withlocks; 1249 MARK_VNODE(fdvp); 1250 MARK_VNODE(fvp); 1251 marked = 1; 1252 1253 fdp = VTOI(fdvp); 1254 ip = VTOI(fvp); 1255 if ((nlink_t) ip->i_nlink >= LINK_MAX) { 1256 error = EMLINK; 1257 goto abort_withlocks; 1258 } 1259 if ((ip->i_flags & (IMMUTABLE | APPEND)) || 1260 (fdp->i_flags & APPEND)) { 1261 error = EPERM; 1262 goto abort_withlocks; 1263 } 1264 if ((ip->i_mode & IFMT) == IFDIR) { 1265 /* 1266 * Avoid ".", "..", and aliases of "." for obvious reasons. 1267 */ 1268 if ((fcnp->cn_namelen == 1 && fcnp->cn_nameptr[0] == '.') || 1269 fdp == ip || 1270 (fcnp->cn_flags & ISDOTDOT) || 1271 (tcnp->cn_flags & ISDOTDOT) || 1272 (ip->i_flag & IN_RENAME)) { 1273 error = EINVAL; 1274 goto abort_withlocks; 1275 } 1276 ip->i_flag |= IN_RENAME; 1277 doingdirectory = 1; 1278 } 1279 oldparent = fdp->i_number; 1280 VN_KNOTE(fdvp, NOTE_WRITE); /* XXXLUKEM/XXX: right place? */ 1281 1282 /* 1283 * Both the directory 1284 * and target vnodes are locked. 1285 */ 1286 tdp = VTOI(tdvp); 1287 txp = NULL; 1288 if (tvp) 1289 txp = VTOI(tvp); 1290 1291 mp = fdvp->v_mount; 1292 fstrans_start(mp, FSTRANS_SHARED); 1293 1294 if (oldparent != tdp->i_number) 1295 newparent = tdp->i_number; 1296 1297 /* 1298 * If ".." must be changed (ie the directory gets a new 1299 * parent) the user must have write permission in the source 1300 * so as to be able to change "..". 1301 */ 1302 if (doingdirectory && newparent) { 1303 error = VOP_ACCESS(fvp, VWRITE, tcnp->cn_cred); 1304 if (error) 1305 goto out; 1306 } 1307 1308 KASSERT(fdvp != tvp); 1309 1310 if (newparent) { 1311 /* Check for the rename("foo/foo", "foo") case. */ 1312 if (fdvp == tvp) { 1313 error = doingdirectory ? ENOTEMPTY : EISDIR; 1314 goto out; 1315 } 1316 } 1317 1318 fxp = VTOI(fvp); 1319 fdp = VTOI(fdvp); 1320 1321 error = UFS_WAPBL_BEGIN(fdvp->v_mount); 1322 if (error) 1323 goto out2; 1324 1325 /* 1326 * 1) Bump link count while we're moving stuff 1327 * around. If we crash somewhere before 1328 * completing our work, the link count 1329 * may be wrong, but correctable. 1330 */ 1331 ip->i_nlink++; 1332 DIP_ASSIGN(ip, nlink, ip->i_nlink); 1333 ip->i_flag |= IN_CHANGE; 1334 if ((error = UFS_UPDATE(fvp, NULL, NULL, UPDATE_DIROP)) != 0) { 1335 goto bad; 1336 } 1337 1338 /* 1339 * 2) If target doesn't exist, link the target 1340 * to the source and unlink the source. 1341 * Otherwise, rewrite the target directory 1342 * entry to reference the source inode and 1343 * expunge the original entry's existence. 1344 */ 1345 if (txp == NULL) { 1346 if (tdp->i_dev != ip->i_dev) 1347 panic("rename: EXDEV"); 1348 /* 1349 * Account for ".." in new directory. 1350 * When source and destination have the same 1351 * parent we don't fool with the link count. 1352 */ 1353 if (doingdirectory && newparent) { 1354 if ((nlink_t)tdp->i_nlink >= LINK_MAX) { 1355 error = EMLINK; 1356 goto bad; 1357 } 1358 tdp->i_nlink++; 1359 DIP_ASSIGN(tdp, nlink, tdp->i_nlink); 1360 tdp->i_flag |= IN_CHANGE; 1361 if ((error = UFS_UPDATE(tdvp, NULL, NULL, 1362 UPDATE_DIROP)) != 0) { 1363 tdp->i_nlink--; 1364 DIP_ASSIGN(tdp, nlink, tdp->i_nlink); 1365 tdp->i_flag |= IN_CHANGE; 1366 goto bad; 1367 } 1368 } 1369 newdir = pool_cache_get(ufs_direct_cache, PR_WAITOK); 1370 ufs_makedirentry(ip, tcnp, newdir); 1371 error = ufs_direnter(tdvp, &to_ulr, 1372 NULL, newdir, tcnp, NULL); 1373 pool_cache_put(ufs_direct_cache, newdir); 1374 if (error != 0) { 1375 if (doingdirectory && newparent) { 1376 tdp->i_nlink--; 1377 DIP_ASSIGN(tdp, nlink, tdp->i_nlink); 1378 tdp->i_flag |= IN_CHANGE; 1379 (void)UFS_UPDATE(tdvp, NULL, NULL, 1380 UPDATE_WAIT | UPDATE_DIROP); 1381 } 1382 goto bad; 1383 } 1384 VN_KNOTE(tdvp, NOTE_WRITE); 1385 } else { 1386 if (txp->i_dev != tdp->i_dev || txp->i_dev != ip->i_dev) 1387 panic("rename: EXDEV"); 1388 /* 1389 * Short circuit rename(foo, foo). 1390 */ 1391 if (txp->i_number == ip->i_number) 1392 panic("rename: same file"); 1393 /* 1394 * If the parent directory is "sticky", then the user must 1395 * own the parent directory, or the destination of the rename, 1396 * otherwise the destination may not be changed (except by 1397 * root). This implements append-only directories. 1398 */ 1399 if ((tdp->i_mode & S_ISTXT) && 1400 kauth_authorize_generic(tcnp->cn_cred, 1401 KAUTH_GENERIC_ISSUSER, NULL) != 0 && 1402 kauth_cred_geteuid(tcnp->cn_cred) != tdp->i_uid && 1403 txp->i_uid != kauth_cred_geteuid(tcnp->cn_cred)) { 1404 error = EPERM; 1405 goto bad; 1406 } 1407 /* 1408 * Target must be empty if a directory and have no links 1409 * to it. Also, ensure source and target are compatible 1410 * (both directories, or both not directories). 1411 */ 1412 if ((txp->i_mode & IFMT) == IFDIR) { 1413 if (txp->i_nlink > 2 || 1414 !ufs_dirempty(txp, tdp->i_number, tcnp->cn_cred)) { 1415 error = ENOTEMPTY; 1416 goto bad; 1417 } 1418 if (!doingdirectory) { 1419 error = ENOTDIR; 1420 goto bad; 1421 } 1422 cache_purge(tdvp); 1423 } else if (doingdirectory) { 1424 error = EISDIR; 1425 goto bad; 1426 } 1427 if ((error = ufs_dirrewrite(tdp, to_ulr.ulr_offset, 1428 txp, ip->i_number, 1429 IFTODT(ip->i_mode), doingdirectory && newparent ? 1430 newparent : doingdirectory, IN_CHANGE | IN_UPDATE)) != 0) 1431 goto bad; 1432 if (doingdirectory) { 1433 /* 1434 * Truncate inode. The only stuff left in the directory 1435 * is "." and "..". The "." reference is inconsequential 1436 * since we are quashing it. We have removed the "." 1437 * reference and the reference in the parent directory, 1438 * but there may be other hard links. 1439 */ 1440 if (!newparent) { 1441 tdp->i_nlink--; 1442 DIP_ASSIGN(tdp, nlink, tdp->i_nlink); 1443 tdp->i_flag |= IN_CHANGE; 1444 UFS_WAPBL_UPDATE(tdvp, NULL, NULL, 0); 1445 } 1446 txp->i_nlink--; 1447 DIP_ASSIGN(txp, nlink, txp->i_nlink); 1448 txp->i_flag |= IN_CHANGE; 1449 if ((error = UFS_TRUNCATE(tvp, (off_t)0, IO_SYNC, 1450 tcnp->cn_cred))) 1451 goto bad; 1452 } 1453 VN_KNOTE(tdvp, NOTE_WRITE); 1454 VN_KNOTE(tvp, NOTE_DELETE); 1455 } 1456 1457 /* 1458 * Handle case where the directory entry we need to remove, 1459 * which is/was at from_ulr.ulr_offset, or the one before it, 1460 * which is/was at from_ulr.ulr_offset - from_ulr.ulr_count, 1461 * may have been moved when the directory insertion above 1462 * performed compaction. 1463 */ 1464 if (tdp->i_number == fdp->i_number && 1465 ulr_overlap(&from_ulr, &to_ulr)) { 1466 1467 struct buf *bp; 1468 struct direct *ep; 1469 struct ufsmount *ump = fdp->i_ump; 1470 doff_t curpos; 1471 doff_t endsearch; /* offset to end directory search */ 1472 uint32_t prev_reclen; 1473 int dirblksiz = ump->um_dirblksiz; 1474 const int needswap = UFS_MPNEEDSWAP(ump); 1475 u_long bmask; 1476 int namlen, entryoffsetinblock; 1477 char *dirbuf; 1478 1479 bmask = fdvp->v_mount->mnt_stat.f_iosize - 1; 1480 1481 /* 1482 * The fcnp entry will be somewhere between the start of 1483 * compaction (to_ulr.ulr_offset) and the original location 1484 * (from_ulr.ulr_offset). 1485 */ 1486 curpos = to_ulr.ulr_offset; 1487 endsearch = from_ulr.ulr_offset + from_ulr.ulr_reclen; 1488 entryoffsetinblock = 0; 1489 1490 /* 1491 * Get the directory block containing the start of 1492 * compaction. 1493 */ 1494 error = ufs_blkatoff(fdvp, (off_t)to_ulr.ulr_offset, &dirbuf, 1495 &bp, false); 1496 if (error) 1497 goto bad; 1498 1499 /* 1500 * Keep existing ulr_count (length of previous record) 1501 * for the case where compaction did not include the 1502 * previous entry but started at the from-entry. 1503 */ 1504 prev_reclen = from_ulr.ulr_count; 1505 1506 while (curpos < endsearch) { 1507 uint32_t reclen; 1508 1509 /* 1510 * If necessary, get the next directory block. 1511 * 1512 * dholland 7/13/11 to the best of my understanding 1513 * this should never happen; compaction occurs only 1514 * within single blocks. I think. 1515 */ 1516 if ((curpos & bmask) == 0) { 1517 if (bp != NULL) 1518 brelse(bp, 0); 1519 error = ufs_blkatoff(fdvp, (off_t)curpos, 1520 &dirbuf, &bp, false); 1521 if (error) 1522 goto bad; 1523 entryoffsetinblock = 0; 1524 } 1525 1526 KASSERT(bp != NULL); 1527 ep = (struct direct *)(dirbuf + entryoffsetinblock); 1528 reclen = ufs_rw16(ep->d_reclen, needswap); 1529 1530#if (BYTE_ORDER == LITTLE_ENDIAN) 1531 if (FSFMT(fdvp) && needswap == 0) 1532 namlen = ep->d_type; 1533 else 1534 namlen = ep->d_namlen; 1535#else 1536 if (FSFMT(fdvp) && needswap != 0) 1537 namlen = ep->d_type; 1538 else 1539 namlen = ep->d_namlen; 1540#endif 1541 if ((ep->d_ino != 0) && 1542 (ufs_rw32(ep->d_ino, needswap) != WINO) && 1543 (namlen == fcnp->cn_namelen) && 1544 memcmp(ep->d_name, fcnp->cn_nameptr, namlen) == 0) { 1545 from_ulr.ulr_reclen = reclen; 1546 break; 1547 } 1548 curpos += reclen; 1549 entryoffsetinblock += reclen; 1550 prev_reclen = reclen; 1551 } 1552 1553 from_ulr.ulr_offset = curpos; 1554 from_ulr.ulr_count = prev_reclen; 1555 1556 KASSERT(curpos <= endsearch); 1557 1558 /* 1559 * If ulr_offset points to start of a directory block, 1560 * clear ulr_count so ufs_dirremove() doesn't try to 1561 * merge free space over a directory block boundary. 1562 */ 1563 if ((from_ulr.ulr_offset & (dirblksiz - 1)) == 0) 1564 from_ulr.ulr_count = 0; 1565 1566 brelse(bp, 0); 1567 } 1568 1569 /* 1570 * 3) Unlink the source. 1571 */ 1572 1573#if 0 1574 /* 1575 * Ensure that the directory entry still exists and has not 1576 * changed while the new name has been entered. If the source is 1577 * a file then the entry may have been unlinked or renamed. In 1578 * either case there is no further work to be done. If the source 1579 * is a directory then it cannot have been rmdir'ed; The IRENAME 1580 * flag ensures that it cannot be moved by another rename or removed 1581 * by a rmdir. 1582 */ 1583#endif 1584 KASSERT(fxp == ip); 1585 1586 /* 1587 * If the source is a directory with a new parent, the link 1588 * count of the old parent directory must be decremented and 1589 * ".." set to point to the new parent. 1590 */ 1591 if (doingdirectory && newparent) { 1592 KASSERT(fdp != NULL); 1593 ufs_dirrewrite(fxp, mastertemplate.dot_reclen, 1594 fdp, newparent, DT_DIR, 0, IN_CHANGE); 1595 cache_purge(fdvp); 1596 } 1597 error = ufs_dirremove(fdvp, &from_ulr, 1598 fxp, fcnp->cn_flags, 0); 1599 fxp->i_flag &= ~IN_RENAME; 1600 1601 VN_KNOTE(fvp, NOTE_RENAME); 1602 goto done; 1603 1604 out: 1605 goto out2; 1606 1607 /* exit routines from steps 1 & 2 */ 1608 bad: 1609 if (doingdirectory) 1610 ip->i_flag &= ~IN_RENAME; 1611 ip->i_nlink--; 1612 DIP_ASSIGN(ip, nlink, ip->i_nlink); 1613 ip->i_flag |= IN_CHANGE; 1614 ip->i_flag &= ~IN_RENAME; 1615 UFS_WAPBL_UPDATE(fvp, NULL, NULL, 0); 1616 done: 1617 UFS_WAPBL_END(fdvp->v_mount); 1618 out2: 1619 /* 1620 * clear IN_RENAME - some exit paths happen too early to go 1621 * through the cleanup done in the "bad" case above, so we 1622 * always do this mini-cleanup here. 1623 */ 1624 ip->i_flag &= ~IN_RENAME; 1625 1626 VOP_UNLOCK(fdvp); 1627 if (tdvp != fdvp) { 1628 VOP_UNLOCK(tdvp); 1629 } 1630 VOP_UNLOCK(fvp); 1631 if (tvp && tvp != fvp) { 1632 VOP_UNLOCK(tvp); 1633 } 1634 1635 vrele(fdvp); 1636 vrele(tdvp); 1637 vrele(fvp); 1638 if (tvp) { 1639 vrele(tvp); 1640 } 1641 1642 fstrans_done(mp); 1643 if (marked) { 1644 UNMARK_VNODE(fdvp); 1645 UNMARK_VNODE(fvp); 1646 SET_ENDOP_REMOVE(fs, tdvp, tvp, "rename"); 1647 } 1648 return (error); 1649 1650 abort_withlocks: 1651 VOP_UNLOCK(fdvp); 1652 if (tdvp != fdvp) { 1653 VOP_UNLOCK(tdvp); 1654 } 1655 VOP_UNLOCK(fvp); 1656 if (tvp && tvp != fvp) { 1657 VOP_UNLOCK(tvp); 1658 } 1659 1660 abort: 1661 VOP_ABORTOP(fdvp, fcnp); /* XXX, why not in NFS? */ 1662 VOP_ABORTOP(tdvp, tcnp); /* XXX, why not in NFS? */ 1663 vrele(tdvp); 1664 if (tvp) { 1665 vrele(tvp); 1666 } 1667 vrele(fdvp); 1668 if (fvp) { 1669 vrele(fvp); 1670 } 1671 if (marked) { 1672 UNMARK_VNODE(fdvp); 1673 UNMARK_VNODE(fvp); 1674 SET_ENDOP_REMOVE(fs, tdvp, tvp, "rename"); 1675 } 1676 return (error); 1677} 1678 1679/* XXX hack to avoid calling ITIMES in getattr */ 1680int 1681lfs_getattr(void *v) 1682{ 1683 struct vop_getattr_args /* { 1684 struct vnode *a_vp; 1685 struct vattr *a_vap; 1686 kauth_cred_t a_cred; 1687 } */ *ap = v; 1688 struct vnode *vp = ap->a_vp; 1689 struct inode *ip = VTOI(vp); 1690 struct vattr *vap = ap->a_vap; 1691 struct lfs *fs = ip->i_lfs; 1692 /* 1693 * Copy from inode table 1694 */ 1695 vap->va_fsid = ip->i_dev; 1696 vap->va_fileid = ip->i_number; 1697 vap->va_mode = ip->i_mode & ~IFMT; 1698 vap->va_nlink = ip->i_nlink; 1699 vap->va_uid = ip->i_uid; 1700 vap->va_gid = ip->i_gid; 1701 vap->va_rdev = (dev_t)ip->i_ffs1_rdev; 1702 vap->va_size = vp->v_size; 1703 vap->va_atime.tv_sec = ip->i_ffs1_atime; 1704 vap->va_atime.tv_nsec = ip->i_ffs1_atimensec; 1705 vap->va_mtime.tv_sec = ip->i_ffs1_mtime; 1706 vap->va_mtime.tv_nsec = ip->i_ffs1_mtimensec; 1707 vap->va_ctime.tv_sec = ip->i_ffs1_ctime; 1708 vap->va_ctime.tv_nsec = ip->i_ffs1_ctimensec; 1709 vap->va_flags = ip->i_flags; 1710 vap->va_gen = ip->i_gen; 1711 /* this doesn't belong here */ 1712 if (vp->v_type == VBLK) 1713 vap->va_blocksize = BLKDEV_IOSIZE; 1714 else if (vp->v_type == VCHR) 1715 vap->va_blocksize = MAXBSIZE; 1716 else 1717 vap->va_blocksize = vp->v_mount->mnt_stat.f_iosize; 1718 vap->va_bytes = fsbtob(fs, (u_quad_t)ip->i_lfs_effnblks); 1719 vap->va_type = vp->v_type; 1720 vap->va_filerev = ip->i_modrev; 1721 return (0); 1722} 1723 1724/* 1725 * Check to make sure the inode blocks won't choke the buffer 1726 * cache, then call ufs_setattr as usual. 1727 */ 1728int 1729lfs_setattr(void *v) 1730{ 1731 struct vop_setattr_args /* { 1732 struct vnode *a_vp; 1733 struct vattr *a_vap; 1734 kauth_cred_t a_cred; 1735 } */ *ap = v; 1736 struct vnode *vp = ap->a_vp; 1737 1738 lfs_check(vp, LFS_UNUSED_LBN, 0); 1739 return ufs_setattr(v); 1740} 1741 1742/* 1743 * Release the block we hold on lfs_newseg wrapping. Called on file close, 1744 * or explicitly from LFCNWRAPGO. Called with the interlock held. 1745 */ 1746static int 1747lfs_wrapgo(struct lfs *fs, struct inode *ip, int waitfor) 1748{ 1749 if (fs->lfs_stoplwp != curlwp) 1750 return EBUSY; 1751 1752 fs->lfs_stoplwp = NULL; 1753 cv_signal(&fs->lfs_stopcv); 1754 1755 KASSERT(fs->lfs_nowrap > 0); 1756 if (fs->lfs_nowrap <= 0) { 1757 return 0; 1758 } 1759 1760 if (--fs->lfs_nowrap == 0) { 1761 log(LOG_NOTICE, "%s: re-enabled log wrap\n", fs->lfs_fsmnt); 1762 wakeup(&fs->lfs_wrappass); 1763 lfs_wakeup_cleaner(fs); 1764 } 1765 if (waitfor) { 1766 mtsleep(&fs->lfs_nextseg, PCATCH | PUSER, "segment", 1767 0, &lfs_lock); 1768 } 1769 1770 return 0; 1771} 1772 1773/* 1774 * Close called 1775 */ 1776/* ARGSUSED */ 1777int 1778lfs_close(void *v) 1779{ 1780 struct vop_close_args /* { 1781 struct vnode *a_vp; 1782 int a_fflag; 1783 kauth_cred_t a_cred; 1784 } */ *ap = v; 1785 struct vnode *vp = ap->a_vp; 1786 struct inode *ip = VTOI(vp); 1787 struct lfs *fs = ip->i_lfs; 1788 1789 if ((ip->i_number == ROOTINO || ip->i_number == LFS_IFILE_INUM) && 1790 fs->lfs_stoplwp == curlwp) { 1791 mutex_enter(&lfs_lock); 1792 log(LOG_NOTICE, "lfs_close: releasing log wrap control\n"); 1793 lfs_wrapgo(fs, ip, 0); 1794 mutex_exit(&lfs_lock); 1795 } 1796 1797 if (vp == ip->i_lfs->lfs_ivnode && 1798 vp->v_mount->mnt_iflag & IMNT_UNMOUNT) 1799 return 0; 1800 1801 if (vp->v_usecount > 1 && vp != ip->i_lfs->lfs_ivnode) { 1802 LFS_ITIMES(ip, NULL, NULL, NULL); 1803 } 1804 return (0); 1805} 1806 1807/* 1808 * Close wrapper for special devices. 1809 * 1810 * Update the times on the inode then do device close. 1811 */ 1812int 1813lfsspec_close(void *v) 1814{ 1815 struct vop_close_args /* { 1816 struct vnode *a_vp; 1817 int a_fflag; 1818 kauth_cred_t a_cred; 1819 } */ *ap = v; 1820 struct vnode *vp; 1821 struct inode *ip; 1822 1823 vp = ap->a_vp; 1824 ip = VTOI(vp); 1825 if (vp->v_usecount > 1) { 1826 LFS_ITIMES(ip, NULL, NULL, NULL); 1827 } 1828 return (VOCALL (spec_vnodeop_p, VOFFSET(vop_close), ap)); 1829} 1830 1831/* 1832 * Close wrapper for fifo's. 1833 * 1834 * Update the times on the inode then do device close. 1835 */ 1836int 1837lfsfifo_close(void *v) 1838{ 1839 struct vop_close_args /* { 1840 struct vnode *a_vp; 1841 int a_fflag; 1842 kauth_cred_ a_cred; 1843 } */ *ap = v; 1844 struct vnode *vp; 1845 struct inode *ip; 1846 1847 vp = ap->a_vp; 1848 ip = VTOI(vp); 1849 if (ap->a_vp->v_usecount > 1) { 1850 LFS_ITIMES(ip, NULL, NULL, NULL); 1851 } 1852 return (VOCALL (fifo_vnodeop_p, VOFFSET(vop_close), ap)); 1853} 1854 1855/* 1856 * Reclaim an inode so that it can be used for other purposes. 1857 */ 1858 1859int 1860lfs_reclaim(void *v) 1861{ 1862 struct vop_reclaim_args /* { 1863 struct vnode *a_vp; 1864 } */ *ap = v; 1865 struct vnode *vp = ap->a_vp; 1866 struct inode *ip = VTOI(vp); 1867 struct lfs *fs = ip->i_lfs; 1868 int error; 1869 1870 /* 1871 * The inode must be freed and updated before being removed 1872 * from its hash chain. Other threads trying to gain a hold 1873 * on the inode will be stalled because it is locked (VI_XLOCK). 1874 */ 1875 if (ip->i_nlink <= 0 && (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) 1876 lfs_vfree(vp, ip->i_number, ip->i_omode); 1877 1878 mutex_enter(&lfs_lock); 1879 LFS_CLR_UINO(ip, IN_ALLMOD); 1880 mutex_exit(&lfs_lock); 1881 if ((error = ufs_reclaim(vp))) 1882 return (error); 1883 1884 /* 1885 * Take us off the paging and/or dirop queues if we were on them. 1886 * We shouldn't be on them. 1887 */ 1888 mutex_enter(&lfs_lock); 1889 if (ip->i_flags & IN_PAGING) { 1890 log(LOG_WARNING, "%s: reclaimed vnode is IN_PAGING\n", 1891 fs->lfs_fsmnt); 1892 ip->i_flags &= ~IN_PAGING; 1893 TAILQ_REMOVE(&fs->lfs_pchainhd, ip, i_lfs_pchain); 1894 } 1895 if (vp->v_uflag & VU_DIROP) { 1896 panic("reclaimed vnode is VU_DIROP"); 1897 vp->v_uflag &= ~VU_DIROP; 1898 TAILQ_REMOVE(&fs->lfs_dchainhd, ip, i_lfs_dchain); 1899 } 1900 mutex_exit(&lfs_lock); 1901 1902 pool_put(&lfs_dinode_pool, ip->i_din.ffs1_din); 1903 lfs_deregister_all(vp); 1904 pool_put(&lfs_inoext_pool, ip->inode_ext.lfs); 1905 ip->inode_ext.lfs = NULL; 1906 genfs_node_destroy(vp); 1907 pool_put(&lfs_inode_pool, vp->v_data); 1908 vp->v_data = NULL; 1909 return (0); 1910} 1911 1912/* 1913 * Read a block from a storage device. 1914 * In order to avoid reading blocks that are in the process of being 1915 * written by the cleaner---and hence are not mutexed by the normal 1916 * buffer cache / page cache mechanisms---check for collisions before 1917 * reading. 1918 * 1919 * We inline ufs_strategy to make sure that the VOP_BMAP occurs *before* 1920 * the active cleaner test. 1921 * 1922 * XXX This code assumes that lfs_markv makes synchronous checkpoints. 1923 */ 1924int 1925lfs_strategy(void *v) 1926{ 1927 struct vop_strategy_args /* { 1928 struct vnode *a_vp; 1929 struct buf *a_bp; 1930 } */ *ap = v; 1931 struct buf *bp; 1932 struct lfs *fs; 1933 struct vnode *vp; 1934 struct inode *ip; 1935 daddr_t tbn; 1936#define MAXLOOP 25 1937 int i, sn, error, slept, loopcount; 1938 1939 bp = ap->a_bp; 1940 vp = ap->a_vp; 1941 ip = VTOI(vp); 1942 fs = ip->i_lfs; 1943 1944 /* lfs uses its strategy routine only for read */ 1945 KASSERT(bp->b_flags & B_READ); 1946 1947 if (vp->v_type == VBLK || vp->v_type == VCHR) 1948 panic("lfs_strategy: spec"); 1949 KASSERT(bp->b_bcount != 0); 1950 if (bp->b_blkno == bp->b_lblkno) { 1951 error = VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, 1952 NULL); 1953 if (error) { 1954 bp->b_error = error; 1955 bp->b_resid = bp->b_bcount; 1956 biodone(bp); 1957 return (error); 1958 } 1959 if ((long)bp->b_blkno == -1) /* no valid data */ 1960 clrbuf(bp); 1961 } 1962 if ((long)bp->b_blkno < 0) { /* block is not on disk */ 1963 bp->b_resid = bp->b_bcount; 1964 biodone(bp); 1965 return (0); 1966 } 1967 1968 slept = 1; 1969 loopcount = 0; 1970 mutex_enter(&lfs_lock); 1971 while (slept && fs->lfs_seglock) { 1972 mutex_exit(&lfs_lock); 1973 /* 1974 * Look through list of intervals. 1975 * There will only be intervals to look through 1976 * if the cleaner holds the seglock. 1977 * Since the cleaner is synchronous, we can trust 1978 * the list of intervals to be current. 1979 */ 1980 tbn = dbtofsb(fs, bp->b_blkno); 1981 sn = dtosn(fs, tbn); 1982 slept = 0; 1983 for (i = 0; i < fs->lfs_cleanind; i++) { 1984 if (sn == dtosn(fs, fs->lfs_cleanint[i]) && 1985 tbn >= fs->lfs_cleanint[i]) { 1986 DLOG((DLOG_CLEAN, 1987 "lfs_strategy: ino %d lbn %" PRId64 1988 " ind %d sn %d fsb %" PRIx32 1989 " given sn %d fsb %" PRIx64 "\n", 1990 ip->i_number, bp->b_lblkno, i, 1991 dtosn(fs, fs->lfs_cleanint[i]), 1992 fs->lfs_cleanint[i], sn, tbn)); 1993 DLOG((DLOG_CLEAN, 1994 "lfs_strategy: sleeping on ino %d lbn %" 1995 PRId64 "\n", ip->i_number, bp->b_lblkno)); 1996 mutex_enter(&lfs_lock); 1997 if (LFS_SEGLOCK_HELD(fs) && fs->lfs_iocount) { 1998 /* 1999 * Cleaner can't wait for itself. 2000 * Instead, wait for the blocks 2001 * to be written to disk. 2002 * XXX we need pribio in the test 2003 * XXX here. 2004 */ 2005 mtsleep(&fs->lfs_iocount, 2006 (PRIBIO + 1) | PNORELOCK, 2007 "clean2", hz/10 + 1, 2008 &lfs_lock); 2009 slept = 1; 2010 ++loopcount; 2011 break; 2012 } else if (fs->lfs_seglock) { 2013 mtsleep(&fs->lfs_seglock, 2014 (PRIBIO + 1) | PNORELOCK, 2015 "clean1", 0, 2016 &lfs_lock); 2017 slept = 1; 2018 break; 2019 } 2020 mutex_exit(&lfs_lock); 2021 } 2022 } 2023 mutex_enter(&lfs_lock); 2024 if (loopcount > MAXLOOP) { 2025 printf("lfs_strategy: breaking out of clean2 loop\n"); 2026 break; 2027 } 2028 } 2029 mutex_exit(&lfs_lock); 2030 2031 vp = ip->i_devvp; 2032 VOP_STRATEGY(vp, bp); 2033 return (0); 2034} 2035 2036/* 2037 * Inline lfs_segwrite/lfs_writevnodes, but just for dirops. 2038 * Technically this is a checkpoint (the on-disk state is valid) 2039 * even though we are leaving out all the file data. 2040 */ 2041int 2042lfs_flush_dirops(struct lfs *fs) 2043{ 2044 struct inode *ip, *nip; 2045 struct vnode *vp; 2046 extern int lfs_dostats; 2047 struct segment *sp; 2048 int flags = 0; 2049 int error = 0; 2050 2051 ASSERT_MAYBE_SEGLOCK(fs); 2052 KASSERT(fs->lfs_nadirop == 0); 2053 2054 if (fs->lfs_ronly) 2055 return EROFS; 2056 2057 mutex_enter(&lfs_lock); 2058 if (TAILQ_FIRST(&fs->lfs_dchainhd) == NULL) { 2059 mutex_exit(&lfs_lock); 2060 return 0; 2061 } else 2062 mutex_exit(&lfs_lock); 2063 2064 if (lfs_dostats) 2065 ++lfs_stats.flush_invoked; 2066 2067 lfs_imtime(fs); 2068 lfs_seglock(fs, flags); 2069 sp = fs->lfs_sp; 2070 2071 /* 2072 * lfs_writevnodes, optimized to get dirops out of the way. 2073 * Only write dirops, and don't flush files' pages, only 2074 * blocks from the directories. 2075 * 2076 * We don't need to vref these files because they are 2077 * dirops and so hold an extra reference until the 2078 * segunlock clears them of that status. 2079 * 2080 * We don't need to check for IN_ADIROP because we know that 2081 * no dirops are active. 2082 * 2083 */ 2084 mutex_enter(&lfs_lock); 2085 for (ip = TAILQ_FIRST(&fs->lfs_dchainhd); ip != NULL; ip = nip) { 2086 nip = TAILQ_NEXT(ip, i_lfs_dchain); 2087 mutex_exit(&lfs_lock); 2088 vp = ITOV(ip); 2089 2090 KASSERT((ip->i_flag & IN_ADIROP) == 0); 2091 KASSERT(vp->v_uflag & VU_DIROP); 2092 KASSERT(!(vp->v_iflag & VI_XLOCK)); 2093 2094 /* 2095 * All writes to directories come from dirops; all 2096 * writes to files' direct blocks go through the page 2097 * cache, which we're not touching. Reads to files 2098 * and/or directories will not be affected by writing 2099 * directory blocks inodes and file inodes. So we don't 2100 * really need to lock. 2101 */ 2102 if (vp->v_iflag & VI_XLOCK) { 2103 mutex_enter(&lfs_lock); 2104 continue; 2105 } 2106 /* XXX see below 2107 * waslocked = VOP_ISLOCKED(vp); 2108 */ 2109 if (vp->v_type != VREG && 2110 ((ip->i_flag & IN_ALLMOD) || !VPISEMPTY(vp))) { 2111 error = lfs_writefile(fs, sp, vp); 2112 if (!VPISEMPTY(vp) && !WRITEINPROG(vp) && 2113 !(ip->i_flag & IN_ALLMOD)) { 2114 mutex_enter(&lfs_lock); 2115 LFS_SET_UINO(ip, IN_MODIFIED); 2116 mutex_exit(&lfs_lock); 2117 } 2118 if (error && (sp->seg_flags & SEGM_SINGLE)) { 2119 mutex_enter(&lfs_lock); 2120 error = EAGAIN; 2121 break; 2122 } 2123 } 2124 KDASSERT(ip->i_number != LFS_IFILE_INUM); 2125 error = lfs_writeinode(fs, sp, ip); 2126 mutex_enter(&lfs_lock); 2127 if (error && (sp->seg_flags & SEGM_SINGLE)) { 2128 error = EAGAIN; 2129 break; 2130 } 2131 2132 /* 2133 * We might need to update these inodes again, 2134 * for example, if they have data blocks to write. 2135 * Make sure that after this flush, they are still 2136 * marked IN_MODIFIED so that we don't forget to 2137 * write them. 2138 */ 2139 /* XXX only for non-directories? --KS */ 2140 LFS_SET_UINO(ip, IN_MODIFIED); 2141 } 2142 mutex_exit(&lfs_lock); 2143 /* We've written all the dirops there are */ 2144 ((SEGSUM *)(sp->segsum))->ss_flags &= ~(SS_CONT); 2145 lfs_finalize_fs_seguse(fs); 2146 (void) lfs_writeseg(fs, sp); 2147 lfs_segunlock(fs); 2148 2149 return error; 2150} 2151 2152/* 2153 * Flush all vnodes for which the pagedaemon has requested pageouts. 2154 * Skip over any files that are marked VU_DIROP (since lfs_flush_dirop() 2155 * has just run, this would be an error). If we have to skip a vnode 2156 * for any reason, just skip it; if we have to wait for the cleaner, 2157 * abort. The writer daemon will call us again later. 2158 */ 2159int 2160lfs_flush_pchain(struct lfs *fs) 2161{ 2162 struct inode *ip, *nip; 2163 struct vnode *vp; 2164 extern int lfs_dostats; 2165 struct segment *sp; 2166 int error, error2; 2167 2168 ASSERT_NO_SEGLOCK(fs); 2169 2170 if (fs->lfs_ronly) 2171 return EROFS; 2172 2173 mutex_enter(&lfs_lock); 2174 if (TAILQ_FIRST(&fs->lfs_pchainhd) == NULL) { 2175 mutex_exit(&lfs_lock); 2176 return 0; 2177 } else 2178 mutex_exit(&lfs_lock); 2179 2180 /* Get dirops out of the way */ 2181 if ((error = lfs_flush_dirops(fs)) != 0) 2182 return error; 2183 2184 if (lfs_dostats) 2185 ++lfs_stats.flush_invoked; 2186 2187 /* 2188 * Inline lfs_segwrite/lfs_writevnodes, but just for pageouts. 2189 */ 2190 lfs_imtime(fs); 2191 lfs_seglock(fs, 0); 2192 sp = fs->lfs_sp; 2193 2194 /* 2195 * lfs_writevnodes, optimized to clear pageout requests. 2196 * Only write non-dirop files that are in the pageout queue. 2197 * We're very conservative about what we write; we want to be 2198 * fast and async. 2199 */ 2200 mutex_enter(&lfs_lock); 2201 top: 2202 for (ip = TAILQ_FIRST(&fs->lfs_pchainhd); ip != NULL; ip = nip) { 2203 nip = TAILQ_NEXT(ip, i_lfs_pchain); 2204 vp = ITOV(ip); 2205 2206 if (!(ip->i_flags & IN_PAGING)) 2207 goto top; 2208 2209 mutex_enter(vp->v_interlock); 2210 if ((vp->v_iflag & VI_XLOCK) || (vp->v_uflag & VU_DIROP) != 0) { 2211 mutex_exit(vp->v_interlock); 2212 continue; 2213 } 2214 if (vp->v_type != VREG) { 2215 mutex_exit(vp->v_interlock); 2216 continue; 2217 } 2218 if (lfs_vref(vp)) 2219 continue; 2220 mutex_exit(&lfs_lock); 2221 2222 if (vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_RETRY) != 0) { 2223 lfs_vunref(vp); 2224 mutex_enter(&lfs_lock); 2225 continue; 2226 } 2227 2228 error = lfs_writefile(fs, sp, vp); 2229 if (!VPISEMPTY(vp) && !WRITEINPROG(vp) && 2230 !(ip->i_flag & IN_ALLMOD)) { 2231 mutex_enter(&lfs_lock); 2232 LFS_SET_UINO(ip, IN_MODIFIED); 2233 mutex_exit(&lfs_lock); 2234 } 2235 KDASSERT(ip->i_number != LFS_IFILE_INUM); 2236 error2 = lfs_writeinode(fs, sp, ip); 2237 2238 VOP_UNLOCK(vp); 2239 lfs_vunref(vp); 2240 2241 if (error == EAGAIN || error2 == EAGAIN) { 2242 lfs_writeseg(fs, sp); 2243 mutex_enter(&lfs_lock); 2244 break; 2245 } 2246 mutex_enter(&lfs_lock); 2247 } 2248 mutex_exit(&lfs_lock); 2249 (void) lfs_writeseg(fs, sp); 2250 lfs_segunlock(fs); 2251 2252 return 0; 2253} 2254 2255/* 2256 * Provide a fcntl interface to sys_lfs_{segwait,bmapv,markv}. 2257 */ 2258int 2259lfs_fcntl(void *v) 2260{ 2261 struct vop_fcntl_args /* { 2262 struct vnode *a_vp; 2263 u_int a_command; 2264 void * a_data; 2265 int a_fflag; 2266 kauth_cred_t a_cred; 2267 } */ *ap = v; 2268 struct timeval tv; 2269 struct timeval *tvp; 2270 BLOCK_INFO *blkiov; 2271 CLEANERINFO *cip; 2272 SEGUSE *sup; 2273 int blkcnt, error, oclean; 2274 size_t fh_size; 2275 struct lfs_fcntl_markv blkvp; 2276 struct lwp *l; 2277 fsid_t *fsidp; 2278 struct lfs *fs; 2279 struct buf *bp; 2280 fhandle_t *fhp; 2281 daddr_t off; 2282 2283 /* Only respect LFS fcntls on fs root or Ifile */ 2284 if (VTOI(ap->a_vp)->i_number != ROOTINO && 2285 VTOI(ap->a_vp)->i_number != LFS_IFILE_INUM) { 2286 return ufs_fcntl(v); 2287 } 2288 2289 /* Avoid locking a draining lock */ 2290 if (ap->a_vp->v_mount->mnt_iflag & IMNT_UNMOUNT) { 2291 return ESHUTDOWN; 2292 } 2293 2294 /* LFS control and monitoring fcntls are available only to root */ 2295 l = curlwp; 2296 if (((ap->a_command & 0xff00) >> 8) == 'L' && 2297 (error = kauth_authorize_generic(l->l_cred, KAUTH_GENERIC_ISSUSER, 2298 NULL)) != 0) 2299 return (error); 2300 2301 fs = VTOI(ap->a_vp)->i_lfs; 2302 fsidp = &ap->a_vp->v_mount->mnt_stat.f_fsidx; 2303 2304 error = 0; 2305 switch ((int)ap->a_command) { 2306 case LFCNSEGWAITALL_COMPAT_50: 2307 case LFCNSEGWAITALL_COMPAT: 2308 fsidp = NULL; 2309 /* FALLSTHROUGH */ 2310 case LFCNSEGWAIT_COMPAT_50: 2311 case LFCNSEGWAIT_COMPAT: 2312 { 2313 struct timeval50 *tvp50 2314 = (struct timeval50 *)ap->a_data; 2315 timeval50_to_timeval(tvp50, &tv); 2316 tvp = &tv; 2317 } 2318 goto segwait_common; 2319 case LFCNSEGWAITALL: 2320 fsidp = NULL; 2321 /* FALLSTHROUGH */ 2322 case LFCNSEGWAIT: 2323 tvp = (struct timeval *)ap->a_data; 2324segwait_common: 2325 mutex_enter(&lfs_lock); 2326 ++fs->lfs_sleepers; 2327 mutex_exit(&lfs_lock); 2328 2329 error = lfs_segwait(fsidp, tvp); 2330 2331 mutex_enter(&lfs_lock); 2332 if (--fs->lfs_sleepers == 0) 2333 wakeup(&fs->lfs_sleepers); 2334 mutex_exit(&lfs_lock); 2335 return error; 2336 2337 case LFCNBMAPV: 2338 case LFCNMARKV: 2339 blkvp = *(struct lfs_fcntl_markv *)ap->a_data; 2340 2341 blkcnt = blkvp.blkcnt; 2342 if ((u_int) blkcnt > LFS_MARKV_MAXBLKCNT) 2343 return (EINVAL); 2344 blkiov = lfs_malloc(fs, blkcnt * sizeof(BLOCK_INFO), LFS_NB_BLKIOV); 2345 if ((error = copyin(blkvp.blkiov, blkiov, 2346 blkcnt * sizeof(BLOCK_INFO))) != 0) { 2347 lfs_free(fs, blkiov, LFS_NB_BLKIOV); 2348 return error; 2349 } 2350 2351 mutex_enter(&lfs_lock); 2352 ++fs->lfs_sleepers; 2353 mutex_exit(&lfs_lock); 2354 if (ap->a_command == LFCNBMAPV) 2355 error = lfs_bmapv(l->l_proc, fsidp, blkiov, blkcnt); 2356 else /* LFCNMARKV */ 2357 error = lfs_markv(l->l_proc, fsidp, blkiov, blkcnt); 2358 if (error == 0) 2359 error = copyout(blkiov, blkvp.blkiov, 2360 blkcnt * sizeof(BLOCK_INFO)); 2361 mutex_enter(&lfs_lock); 2362 if (--fs->lfs_sleepers == 0) 2363 wakeup(&fs->lfs_sleepers); 2364 mutex_exit(&lfs_lock); 2365 lfs_free(fs, blkiov, LFS_NB_BLKIOV); 2366 return error; 2367 2368 case LFCNRECLAIM: 2369 /* 2370 * Flush dirops and write Ifile, allowing empty segments 2371 * to be immediately reclaimed. 2372 */ 2373 lfs_writer_enter(fs, "pndirop"); 2374 off = fs->lfs_offset; 2375 lfs_seglock(fs, SEGM_FORCE_CKP | SEGM_CKP); 2376 lfs_flush_dirops(fs); 2377 LFS_CLEANERINFO(cip, fs, bp); 2378 oclean = cip->clean; 2379 LFS_SYNC_CLEANERINFO(cip, fs, bp, 1); 2380 lfs_segwrite(ap->a_vp->v_mount, SEGM_FORCE_CKP); 2381 fs->lfs_sp->seg_flags |= SEGM_PROT; 2382 lfs_segunlock(fs); 2383 lfs_writer_leave(fs); 2384 2385#ifdef DEBUG 2386 LFS_CLEANERINFO(cip, fs, bp); 2387 DLOG((DLOG_CLEAN, "lfs_fcntl: reclaim wrote %" PRId64 2388 " blocks, cleaned %" PRId32 " segments (activesb %d)\n", 2389 fs->lfs_offset - off, cip->clean - oclean, 2390 fs->lfs_activesb)); 2391 LFS_SYNC_CLEANERINFO(cip, fs, bp, 0); 2392#endif 2393 2394 return 0; 2395 2396 case LFCNIFILEFH_COMPAT: 2397 /* Return the filehandle of the Ifile */ 2398 if ((error = kauth_authorize_system(l->l_cred, 2399 KAUTH_SYSTEM_FILEHANDLE, 0, NULL, NULL, NULL)) != 0) 2400 return (error); 2401 fhp = (struct fhandle *)ap->a_data; 2402 fhp->fh_fsid = *fsidp; 2403 fh_size = 16; /* former VFS_MAXFIDSIZ */ 2404 return lfs_vptofh(fs->lfs_ivnode, &(fhp->fh_fid), &fh_size); 2405 2406 case LFCNIFILEFH_COMPAT2: 2407 case LFCNIFILEFH: 2408 /* Return the filehandle of the Ifile */ 2409 fhp = (struct fhandle *)ap->a_data; 2410 fhp->fh_fsid = *fsidp; 2411 fh_size = sizeof(struct lfs_fhandle) - 2412 offsetof(fhandle_t, fh_fid); 2413 return lfs_vptofh(fs->lfs_ivnode, &(fhp->fh_fid), &fh_size); 2414 2415 case LFCNREWIND: 2416 /* Move lfs_offset to the lowest-numbered segment */ 2417 return lfs_rewind(fs, *(int *)ap->a_data); 2418 2419 case LFCNINVAL: 2420 /* Mark a segment SEGUSE_INVAL */ 2421 LFS_SEGENTRY(sup, fs, *(int *)ap->a_data, bp); 2422 if (sup->su_nbytes > 0) { 2423 brelse(bp, 0); 2424 lfs_unset_inval_all(fs); 2425 return EBUSY; 2426 } 2427 sup->su_flags |= SEGUSE_INVAL; 2428 VOP_BWRITE(bp->b_vp, bp); 2429 return 0; 2430 2431 case LFCNRESIZE: 2432 /* Resize the filesystem */ 2433 return lfs_resize_fs(fs, *(int *)ap->a_data); 2434 2435 case LFCNWRAPSTOP: 2436 case LFCNWRAPSTOP_COMPAT: 2437 /* 2438 * Hold lfs_newseg at segment 0; if requested, sleep until 2439 * the filesystem wraps around. To support external agents 2440 * (dump, fsck-based regression test) that need to look at 2441 * a snapshot of the filesystem, without necessarily 2442 * requiring that all fs activity stops. 2443 */ 2444 if (fs->lfs_stoplwp == curlwp) 2445 return EALREADY; 2446 2447 mutex_enter(&lfs_lock); 2448 while (fs->lfs_stoplwp != NULL) 2449 cv_wait(&fs->lfs_stopcv, &lfs_lock); 2450 fs->lfs_stoplwp = curlwp; 2451 if (fs->lfs_nowrap == 0) 2452 log(LOG_NOTICE, "%s: disabled log wrap\n", fs->lfs_fsmnt); 2453 ++fs->lfs_nowrap; 2454 if (*(int *)ap->a_data == 1 2455 || ap->a_command == LFCNWRAPSTOP_COMPAT) { 2456 log(LOG_NOTICE, "LFCNSTOPWRAP waiting for log wrap\n"); 2457 error = mtsleep(&fs->lfs_nowrap, PCATCH | PUSER, 2458 "segwrap", 0, &lfs_lock); 2459 log(LOG_NOTICE, "LFCNSTOPWRAP done waiting\n"); 2460 if (error) { 2461 lfs_wrapgo(fs, VTOI(ap->a_vp), 0); 2462 } 2463 } 2464 mutex_exit(&lfs_lock); 2465 return 0; 2466 2467 case LFCNWRAPGO: 2468 case LFCNWRAPGO_COMPAT: 2469 /* 2470 * Having done its work, the agent wakes up the writer. 2471 * If the argument is 1, it sleeps until a new segment 2472 * is selected. 2473 */ 2474 mutex_enter(&lfs_lock); 2475 error = lfs_wrapgo(fs, VTOI(ap->a_vp), 2476 ap->a_command == LFCNWRAPGO_COMPAT ? 1 : 2477 *((int *)ap->a_data)); 2478 mutex_exit(&lfs_lock); 2479 return error; 2480 2481 case LFCNWRAPPASS: 2482 if ((VTOI(ap->a_vp)->i_lfs_iflags & LFSI_WRAPWAIT)) 2483 return EALREADY; 2484 mutex_enter(&lfs_lock); 2485 if (fs->lfs_stoplwp != curlwp) { 2486 mutex_exit(&lfs_lock); 2487 return EALREADY; 2488 } 2489 if (fs->lfs_nowrap == 0) { 2490 mutex_exit(&lfs_lock); 2491 return EBUSY; 2492 } 2493 fs->lfs_wrappass = 1; 2494 wakeup(&fs->lfs_wrappass); 2495 /* Wait for the log to wrap, if asked */ 2496 if (*(int *)ap->a_data) { 2497 mutex_enter(ap->a_vp->v_interlock); 2498 if (lfs_vref(ap->a_vp) != 0) 2499 panic("LFCNWRAPPASS: lfs_vref failed"); 2500 VTOI(ap->a_vp)->i_lfs_iflags |= LFSI_WRAPWAIT; 2501 log(LOG_NOTICE, "LFCNPASS waiting for log wrap\n"); 2502 error = mtsleep(&fs->lfs_nowrap, PCATCH | PUSER, 2503 "segwrap", 0, &lfs_lock); 2504 log(LOG_NOTICE, "LFCNPASS done waiting\n"); 2505 VTOI(ap->a_vp)->i_lfs_iflags &= ~LFSI_WRAPWAIT; 2506 lfs_vunref(ap->a_vp); 2507 } 2508 mutex_exit(&lfs_lock); 2509 return error; 2510 2511 case LFCNWRAPSTATUS: 2512 mutex_enter(&lfs_lock); 2513 *(int *)ap->a_data = fs->lfs_wrapstatus; 2514 mutex_exit(&lfs_lock); 2515 return 0; 2516 2517 default: 2518 return ufs_fcntl(v); 2519 } 2520 return 0; 2521} 2522 2523int 2524lfs_getpages(void *v) 2525{ 2526 struct vop_getpages_args /* { 2527 struct vnode *a_vp; 2528 voff_t a_offset; 2529 struct vm_page **a_m; 2530 int *a_count; 2531 int a_centeridx; 2532 vm_prot_t a_access_type; 2533 int a_advice; 2534 int a_flags; 2535 } */ *ap = v; 2536 2537 if (VTOI(ap->a_vp)->i_number == LFS_IFILE_INUM && 2538 (ap->a_access_type & VM_PROT_WRITE) != 0) { 2539 return EPERM; 2540 } 2541 if ((ap->a_access_type & VM_PROT_WRITE) != 0) { 2542 mutex_enter(&lfs_lock); 2543 LFS_SET_UINO(VTOI(ap->a_vp), IN_MODIFIED); 2544 mutex_exit(&lfs_lock); 2545 } 2546 2547 /* 2548 * we're relying on the fact that genfs_getpages() always read in 2549 * entire filesystem blocks. 2550 */ 2551 return genfs_getpages(v); 2552} 2553 2554/* 2555 * Wait for a page to become unbusy, possibly printing diagnostic messages 2556 * as well. 2557 * 2558 * Called with vp->v_interlock held; return with it held. 2559 */ 2560static void 2561wait_for_page(struct vnode *vp, struct vm_page *pg, const char *label) 2562{ 2563 KASSERT(mutex_owned(vp->v_interlock)); 2564 if ((pg->flags & PG_BUSY) == 0) 2565 return; /* Nothing to wait for! */ 2566 2567#if defined(DEBUG) && defined(UVM_PAGE_TRKOWN) 2568 static struct vm_page *lastpg; 2569 2570 if (label != NULL && pg != lastpg) { 2571 if (pg->owner_tag) { 2572 printf("lfs_putpages[%d.%d]: %s: page %p owner %d.%d [%s]\n", 2573 curproc->p_pid, curlwp->l_lid, label, 2574 pg, pg->owner, pg->lowner, pg->owner_tag); 2575 } else { 2576 printf("lfs_putpages[%d.%d]: %s: page %p unowned?!\n", 2577 curproc->p_pid, curlwp->l_lid, label, pg); 2578 } 2579 } 2580 lastpg = pg; 2581#endif 2582 2583 pg->flags |= PG_WANTED; 2584 UVM_UNLOCK_AND_WAIT(pg, vp->v_interlock, 0, "lfsput", 0); 2585 mutex_enter(vp->v_interlock); 2586} 2587 2588/* 2589 * This routine is called by lfs_putpages() when it can't complete the 2590 * write because a page is busy. This means that either (1) someone, 2591 * possibly the pagedaemon, is looking at this page, and will give it up 2592 * presently; or (2) we ourselves are holding the page busy in the 2593 * process of being written (either gathered or actually on its way to 2594 * disk). We don't need to give up the segment lock, but we might need 2595 * to call lfs_writeseg() to expedite the page's journey to disk. 2596 * 2597 * Called with vp->v_interlock held; return with it held. 2598 */ 2599/* #define BUSYWAIT */ 2600static void 2601write_and_wait(struct lfs *fs, struct vnode *vp, struct vm_page *pg, 2602 int seglocked, const char *label) 2603{ 2604 KASSERT(mutex_owned(vp->v_interlock)); 2605#ifndef BUSYWAIT 2606 struct inode *ip = VTOI(vp); 2607 struct segment *sp = fs->lfs_sp; 2608 int count = 0; 2609 2610 if (pg == NULL) 2611 return; 2612 2613 while (pg->flags & PG_BUSY && 2614 pg->uobject == &vp->v_uobj) { 2615 mutex_exit(vp->v_interlock); 2616 if (sp->cbpp - sp->bpp > 1) { 2617 /* Write gathered pages */ 2618 lfs_updatemeta(sp); 2619 lfs_release_finfo(fs); 2620 (void) lfs_writeseg(fs, sp); 2621 2622 /* 2623 * Reinitialize FIP 2624 */ 2625 KASSERT(sp->vp == vp); 2626 lfs_acquire_finfo(fs, ip->i_number, 2627 ip->i_gen); 2628 } 2629 ++count; 2630 mutex_enter(vp->v_interlock); 2631 wait_for_page(vp, pg, label); 2632 } 2633 if (label != NULL && count > 1) { 2634 DLOG((DLOG_PAGE, "lfs_putpages[%d]: %s: %sn = %d\n", 2635 curproc->p_pid, label, (count > 0 ? "looping, " : ""), 2636 count)); 2637 } 2638#else 2639 preempt(1); 2640#endif 2641 KASSERT(mutex_owned(vp->v_interlock)); 2642} 2643 2644/* 2645 * Make sure that for all pages in every block in the given range, 2646 * either all are dirty or all are clean. If any of the pages 2647 * we've seen so far are dirty, put the vnode on the paging chain, 2648 * and mark it IN_PAGING. 2649 * 2650 * If checkfirst != 0, don't check all the pages but return at the 2651 * first dirty page. 2652 */ 2653static int 2654check_dirty(struct lfs *fs, struct vnode *vp, 2655 off_t startoffset, off_t endoffset, off_t blkeof, 2656 int flags, int checkfirst, struct vm_page **pgp) 2657{ 2658 int by_list; 2659 struct vm_page *curpg = NULL; /* XXX: gcc */ 2660 struct vm_page *pgs[MAXBSIZE / PAGE_SIZE], *pg; 2661 off_t soff = 0; /* XXX: gcc */ 2662 voff_t off; 2663 int i; 2664 int nonexistent; 2665 int any_dirty; /* number of dirty pages */ 2666 int dirty; /* number of dirty pages in a block */ 2667 int tdirty; 2668 int pages_per_block = fs->lfs_bsize >> PAGE_SHIFT; 2669 int pagedaemon = (curlwp == uvm.pagedaemon_lwp); 2670 2671 KASSERT(mutex_owned(vp->v_interlock)); 2672 ASSERT_MAYBE_SEGLOCK(fs); 2673 top: 2674 by_list = (vp->v_uobj.uo_npages <= 2675 ((endoffset - startoffset) >> PAGE_SHIFT) * 2676 UVM_PAGE_TREE_PENALTY); 2677 any_dirty = 0; 2678 2679 if (by_list) { 2680 curpg = TAILQ_FIRST(&vp->v_uobj.memq); 2681 } else { 2682 soff = startoffset; 2683 } 2684 while (by_list || soff < MIN(blkeof, endoffset)) { 2685 if (by_list) { 2686 /* 2687 * Find the first page in a block. Skip 2688 * blocks outside our area of interest or beyond 2689 * the end of file. 2690 */ 2691 KASSERT(curpg == NULL 2692 || (curpg->flags & PG_MARKER) == 0); 2693 if (pages_per_block > 1) { 2694 while (curpg && 2695 ((curpg->offset & fs->lfs_bmask) || 2696 curpg->offset >= vp->v_size || 2697 curpg->offset >= endoffset)) { 2698 curpg = TAILQ_NEXT(curpg, listq.queue); 2699 KASSERT(curpg == NULL || 2700 (curpg->flags & PG_MARKER) == 0); 2701 } 2702 } 2703 if (curpg == NULL) 2704 break; 2705 soff = curpg->offset; 2706 } 2707 2708 /* 2709 * Mark all pages in extended range busy; find out if any 2710 * of them are dirty. 2711 */ 2712 nonexistent = dirty = 0; 2713 for (i = 0; i == 0 || i < pages_per_block; i++) { 2714 KASSERT(mutex_owned(vp->v_interlock)); 2715 if (by_list && pages_per_block <= 1) { 2716 pgs[i] = pg = curpg; 2717 } else { 2718 off = soff + (i << PAGE_SHIFT); 2719 pgs[i] = pg = uvm_pagelookup(&vp->v_uobj, off); 2720 if (pg == NULL) { 2721 ++nonexistent; 2722 continue; 2723 } 2724 } 2725 KASSERT(pg != NULL); 2726 2727 /* 2728 * If we're holding the segment lock, we can deadlock 2729 * against a process that has our page and is waiting 2730 * for the cleaner, while the cleaner waits for the 2731 * segment lock. Just bail in that case. 2732 */ 2733 if ((pg->flags & PG_BUSY) && 2734 (pagedaemon || LFS_SEGLOCK_HELD(fs))) { 2735 if (i > 0) 2736 uvm_page_unbusy(pgs, i); 2737 DLOG((DLOG_PAGE, "lfs_putpages: avoiding 3-way or pagedaemon deadlock\n")); 2738 if (pgp) 2739 *pgp = pg; 2740 KASSERT(mutex_owned(vp->v_interlock)); 2741 return -1; 2742 } 2743 2744 while (pg->flags & PG_BUSY) { 2745 wait_for_page(vp, pg, NULL); 2746 KASSERT(mutex_owned(vp->v_interlock)); 2747 if (i > 0) 2748 uvm_page_unbusy(pgs, i); 2749 KASSERT(mutex_owned(vp->v_interlock)); 2750 goto top; 2751 } 2752 pg->flags |= PG_BUSY; 2753 UVM_PAGE_OWN(pg, "lfs_putpages"); 2754 2755 pmap_page_protect(pg, VM_PROT_NONE); 2756 tdirty = (pmap_clear_modify(pg) || 2757 (pg->flags & PG_CLEAN) == 0); 2758 dirty += tdirty; 2759 } 2760 if (pages_per_block > 0 && nonexistent >= pages_per_block) { 2761 if (by_list) { 2762 curpg = TAILQ_NEXT(curpg, listq.queue); 2763 } else { 2764 soff += fs->lfs_bsize; 2765 } 2766 continue; 2767 } 2768 2769 any_dirty += dirty; 2770 KASSERT(nonexistent == 0); 2771 KASSERT(mutex_owned(vp->v_interlock)); 2772 2773 /* 2774 * If any are dirty make all dirty; unbusy them, 2775 * but if we were asked to clean, wire them so that 2776 * the pagedaemon doesn't bother us about them while 2777 * they're on their way to disk. 2778 */ 2779 for (i = 0; i == 0 || i < pages_per_block; i++) { 2780 KASSERT(mutex_owned(vp->v_interlock)); 2781 pg = pgs[i]; 2782 KASSERT(!((pg->flags & PG_CLEAN) && (pg->flags & PG_DELWRI))); 2783 KASSERT(pg->flags & PG_BUSY); 2784 if (dirty) { 2785 pg->flags &= ~PG_CLEAN; 2786 if (flags & PGO_FREE) { 2787 /* 2788 * Wire the page so that 2789 * pdaemon doesn't see it again. 2790 */ 2791 mutex_enter(&uvm_pageqlock); 2792 uvm_pagewire(pg); 2793 mutex_exit(&uvm_pageqlock); 2794 2795 /* Suspended write flag */ 2796 pg->flags |= PG_DELWRI; 2797 } 2798 } 2799 if (pg->flags & PG_WANTED) 2800 wakeup(pg); 2801 pg->flags &= ~(PG_WANTED|PG_BUSY); 2802 UVM_PAGE_OWN(pg, NULL); 2803 } 2804 2805 if (checkfirst && any_dirty) 2806 break; 2807 2808 if (by_list) { 2809 curpg = TAILQ_NEXT(curpg, listq.queue); 2810 } else { 2811 soff += MAX(PAGE_SIZE, fs->lfs_bsize); 2812 } 2813 } 2814 2815 KASSERT(mutex_owned(vp->v_interlock)); 2816 return any_dirty; 2817} 2818 2819/* 2820 * lfs_putpages functions like genfs_putpages except that 2821 * 2822 * (1) It needs to bounds-check the incoming requests to ensure that 2823 * they are block-aligned; if they are not, expand the range and 2824 * do the right thing in case, e.g., the requested range is clean 2825 * but the expanded range is dirty. 2826 * 2827 * (2) It needs to explicitly send blocks to be written when it is done. 2828 * If VOP_PUTPAGES is called without the seglock held, we simply take 2829 * the seglock and let lfs_segunlock wait for us. 2830 * XXX There might be a bad situation if we have to flush a vnode while 2831 * XXX lfs_markv is in operation. As of this writing we panic in this 2832 * XXX case. 2833 * 2834 * Assumptions: 2835 * 2836 * (1) The caller does not hold any pages in this vnode busy. If it does, 2837 * there is a danger that when we expand the page range and busy the 2838 * pages we will deadlock. 2839 * 2840 * (2) We are called with vp->v_interlock held; we must return with it 2841 * released. 2842 * 2843 * (3) We don't absolutely have to free pages right away, provided that 2844 * the request does not have PGO_SYNCIO. When the pagedaemon gives 2845 * us a request with PGO_FREE, we take the pages out of the paging 2846 * queue and wake up the writer, which will handle freeing them for us. 2847 * 2848 * We ensure that for any filesystem block, all pages for that 2849 * block are either resident or not, even if those pages are higher 2850 * than EOF; that means that we will be getting requests to free 2851 * "unused" pages above EOF all the time, and should ignore them. 2852 * 2853 * (4) If we are called with PGO_LOCKED, the finfo array we are to write 2854 * into has been set up for us by lfs_writefile. If not, we will 2855 * have to handle allocating and/or freeing an finfo entry. 2856 * 2857 * XXX note that we're (ab)using PGO_LOCKED as "seglock held". 2858 */ 2859 2860/* How many times to loop before we should start to worry */ 2861#define TOOMANY 4 2862 2863int 2864lfs_putpages(void *v) 2865{ 2866 int error; 2867 struct vop_putpages_args /* { 2868 struct vnode *a_vp; 2869 voff_t a_offlo; 2870 voff_t a_offhi; 2871 int a_flags; 2872 } */ *ap = v; 2873 struct vnode *vp; 2874 struct inode *ip; 2875 struct lfs *fs; 2876 struct segment *sp; 2877 off_t origoffset, startoffset, endoffset, origendoffset, blkeof; 2878 off_t off, max_endoffset; 2879 bool seglocked, sync, pagedaemon, reclaim; 2880 struct vm_page *pg, *busypg; 2881 UVMHIST_FUNC("lfs_putpages"); UVMHIST_CALLED(ubchist); 2882 int oreclaim = 0; 2883 int donewriting = 0; 2884#ifdef DEBUG 2885 int debug_n_again, debug_n_dirtyclean; 2886#endif 2887 2888 vp = ap->a_vp; 2889 ip = VTOI(vp); 2890 fs = ip->i_lfs; 2891 sync = (ap->a_flags & PGO_SYNCIO) != 0; 2892 reclaim = (ap->a_flags & PGO_RECLAIM) != 0; 2893 pagedaemon = (curlwp == uvm.pagedaemon_lwp); 2894 2895 KASSERT(mutex_owned(vp->v_interlock)); 2896 2897 /* Putpages does nothing for metadata. */ 2898 if (vp == fs->lfs_ivnode || vp->v_type != VREG) { 2899 mutex_exit(vp->v_interlock); 2900 return 0; 2901 } 2902 2903 /* 2904 * If there are no pages, don't do anything. 2905 */ 2906 if (vp->v_uobj.uo_npages == 0) { 2907 if (TAILQ_EMPTY(&vp->v_uobj.memq) && 2908 (vp->v_iflag & VI_ONWORKLST) && 2909 LIST_FIRST(&vp->v_dirtyblkhd) == NULL) { 2910 vp->v_iflag &= ~VI_WRMAPDIRTY; 2911 vn_syncer_remove_from_worklist(vp); 2912 } 2913 mutex_exit(vp->v_interlock); 2914 2915 /* Remove us from paging queue, if we were on it */ 2916 mutex_enter(&lfs_lock); 2917 if (ip->i_flags & IN_PAGING) { 2918 ip->i_flags &= ~IN_PAGING; 2919 TAILQ_REMOVE(&fs->lfs_pchainhd, ip, i_lfs_pchain); 2920 } 2921 mutex_exit(&lfs_lock); 2922 2923 KASSERT(!mutex_owned(vp->v_interlock)); 2924 return 0; 2925 } 2926 2927 blkeof = blkroundup(fs, ip->i_size); 2928 2929 /* 2930 * Ignore requests to free pages past EOF but in the same block 2931 * as EOF, unless the vnode is being reclaimed or the request 2932 * is synchronous. (If the request is sync, it comes from 2933 * lfs_truncate.) 2934 * 2935 * To avoid being flooded with this request, make these pages 2936 * look "active". 2937 */ 2938 if (!sync && !reclaim && 2939 ap->a_offlo >= ip->i_size && ap->a_offlo < blkeof) { 2940 origoffset = ap->a_offlo; 2941 for (off = origoffset; off < blkeof; off += fs->lfs_bsize) { 2942 pg = uvm_pagelookup(&vp->v_uobj, off); 2943 KASSERT(pg != NULL); 2944 while (pg->flags & PG_BUSY) { 2945 pg->flags |= PG_WANTED; 2946 UVM_UNLOCK_AND_WAIT(pg, vp->v_interlock, 0, 2947 "lfsput2", 0); 2948 mutex_enter(vp->v_interlock); 2949 } 2950 mutex_enter(&uvm_pageqlock); 2951 uvm_pageactivate(pg); 2952 mutex_exit(&uvm_pageqlock); 2953 } 2954 ap->a_offlo = blkeof; 2955 if (ap->a_offhi > 0 && ap->a_offhi <= ap->a_offlo) { 2956 mutex_exit(vp->v_interlock); 2957 return 0; 2958 } 2959 } 2960 2961 /* 2962 * Extend page range to start and end at block boundaries. 2963 * (For the purposes of VOP_PUTPAGES, fragments don't exist.) 2964 */ 2965 origoffset = ap->a_offlo; 2966 origendoffset = ap->a_offhi; 2967 startoffset = origoffset & ~(fs->lfs_bmask); 2968 max_endoffset = (trunc_page(LLONG_MAX) >> fs->lfs_bshift) 2969 << fs->lfs_bshift; 2970 2971 if (origendoffset == 0 || ap->a_flags & PGO_ALLPAGES) { 2972 endoffset = max_endoffset; 2973 origendoffset = endoffset; 2974 } else { 2975 origendoffset = round_page(ap->a_offhi); 2976 endoffset = round_page(blkroundup(fs, origendoffset)); 2977 } 2978 2979 KASSERT(startoffset > 0 || endoffset >= startoffset); 2980 if (startoffset == endoffset) { 2981 /* Nothing to do, why were we called? */ 2982 mutex_exit(vp->v_interlock); 2983 DLOG((DLOG_PAGE, "lfs_putpages: startoffset = endoffset = %" 2984 PRId64 "\n", startoffset)); 2985 return 0; 2986 } 2987 2988 ap->a_offlo = startoffset; 2989 ap->a_offhi = endoffset; 2990 2991 /* 2992 * If not cleaning, just send the pages through genfs_putpages 2993 * to be returned to the pool. 2994 */ 2995 if (!(ap->a_flags & PGO_CLEANIT)) { 2996 DLOG((DLOG_PAGE, "lfs_putpages: no cleanit vn %p ino %d (flags %x)\n", 2997 vp, (int)ip->i_number, ap->a_flags)); 2998 int r = genfs_putpages(v); 2999 KASSERT(!mutex_owned(vp->v_interlock)); 3000 return r; 3001 } 3002 3003 /* Set PGO_BUSYFAIL to avoid deadlocks */ 3004 ap->a_flags |= PGO_BUSYFAIL; 3005 3006 /* 3007 * Likewise, if we are asked to clean but the pages are not 3008 * dirty, we can just free them using genfs_putpages. 3009 */ 3010#ifdef DEBUG 3011 debug_n_dirtyclean = 0; 3012#endif 3013 do { 3014 int r; 3015 KASSERT(mutex_owned(vp->v_interlock)); 3016 3017 /* Count the number of dirty pages */ 3018 r = check_dirty(fs, vp, startoffset, endoffset, blkeof, 3019 ap->a_flags, 1, NULL); 3020 if (r < 0) { 3021 /* Pages are busy with another process */ 3022 mutex_exit(vp->v_interlock); 3023 return EDEADLK; 3024 } 3025 if (r > 0) /* Some pages are dirty */ 3026 break; 3027 3028 /* 3029 * Sometimes pages are dirtied between the time that 3030 * we check and the time we try to clean them. 3031 * Instruct lfs_gop_write to return EDEADLK in this case 3032 * so we can write them properly. 3033 */ 3034 ip->i_lfs_iflags |= LFSI_NO_GOP_WRITE; 3035 r = genfs_do_putpages(vp, startoffset, endoffset, 3036 ap->a_flags & ~PGO_SYNCIO, &busypg); 3037 ip->i_lfs_iflags &= ~LFSI_NO_GOP_WRITE; 3038 if (r != EDEADLK) { 3039 KASSERT(!mutex_owned(vp->v_interlock)); 3040 return r; 3041 } 3042 3043 /* One of the pages was busy. Start over. */ 3044 mutex_enter(vp->v_interlock); 3045 wait_for_page(vp, busypg, "dirtyclean"); 3046#ifdef DEBUG 3047 ++debug_n_dirtyclean; 3048#endif 3049 } while(1); 3050 3051#ifdef DEBUG 3052 if (debug_n_dirtyclean > TOOMANY) 3053 DLOG((DLOG_PAGE, "lfs_putpages: dirtyclean: looping, n = %d\n", 3054 debug_n_dirtyclean)); 3055#endif 3056 3057 /* 3058 * Dirty and asked to clean. 3059 * 3060 * Pagedaemon can't actually write LFS pages; wake up 3061 * the writer to take care of that. The writer will 3062 * notice the pager inode queue and act on that. 3063 * 3064 * XXX We must drop the vp->interlock before taking the lfs_lock or we 3065 * get a nasty deadlock with lfs_flush_pchain(). 3066 */ 3067 if (pagedaemon) { 3068 mutex_exit(vp->v_interlock); 3069 mutex_enter(&lfs_lock); 3070 if (!(ip->i_flags & IN_PAGING)) { 3071 ip->i_flags |= IN_PAGING; 3072 TAILQ_INSERT_TAIL(&fs->lfs_pchainhd, ip, i_lfs_pchain); 3073 } 3074 wakeup(&lfs_writer_daemon); 3075 mutex_exit(&lfs_lock); 3076 preempt(); 3077 KASSERT(!mutex_owned(vp->v_interlock)); 3078 return EWOULDBLOCK; 3079 } 3080 3081 /* 3082 * If this is a file created in a recent dirop, we can't flush its 3083 * inode until the dirop is complete. Drain dirops, then flush the 3084 * filesystem (taking care of any other pending dirops while we're 3085 * at it). 3086 */ 3087 if ((ap->a_flags & (PGO_CLEANIT|PGO_LOCKED)) == PGO_CLEANIT && 3088 (vp->v_uflag & VU_DIROP)) { 3089 DLOG((DLOG_PAGE, "lfs_putpages: flushing VU_DIROP\n")); 3090 3091 lfs_writer_enter(fs, "ppdirop"); 3092 3093 /* Note if we hold the vnode locked */ 3094 if (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) 3095 { 3096 DLOG((DLOG_PAGE, "lfs_putpages: dirop inode already locked\n")); 3097 } else { 3098 DLOG((DLOG_PAGE, "lfs_putpages: dirop inode not locked\n")); 3099 } 3100 mutex_exit(vp->v_interlock); 3101 3102 mutex_enter(&lfs_lock); 3103 lfs_flush_fs(fs, sync ? SEGM_SYNC : 0); 3104 mutex_exit(&lfs_lock); 3105 3106 mutex_enter(vp->v_interlock); 3107 lfs_writer_leave(fs); 3108 3109 /* The flush will have cleaned out this vnode as well, 3110 no need to do more to it. */ 3111 } 3112 3113 /* 3114 * This is it. We are going to write some pages. From here on 3115 * down it's all just mechanics. 3116 * 3117 * Don't let genfs_putpages wait; lfs_segunlock will wait for us. 3118 */ 3119 ap->a_flags &= ~PGO_SYNCIO; 3120 3121 /* 3122 * If we've already got the seglock, flush the node and return. 3123 * The FIP has already been set up for us by lfs_writefile, 3124 * and FIP cleanup and lfs_updatemeta will also be done there, 3125 * unless genfs_putpages returns EDEADLK; then we must flush 3126 * what we have, and correct FIP and segment header accounting. 3127 */ 3128 get_seglock: 3129 /* 3130 * If we are not called with the segment locked, lock it. 3131 * Account for a new FIP in the segment header, and set sp->vp. 3132 * (This should duplicate the setup at the top of lfs_writefile().) 3133 */ 3134 seglocked = (ap->a_flags & PGO_LOCKED) != 0; 3135 if (!seglocked) { 3136 mutex_exit(vp->v_interlock); 3137 error = lfs_seglock(fs, SEGM_PROT | (sync ? SEGM_SYNC : 0)); 3138 if (error != 0) { 3139 KASSERT(!mutex_owned(vp->v_interlock)); 3140 return error; 3141 } 3142 mutex_enter(vp->v_interlock); 3143 lfs_acquire_finfo(fs, ip->i_number, ip->i_gen); 3144 } 3145 sp = fs->lfs_sp; 3146 KASSERT(sp->vp == NULL); 3147 sp->vp = vp; 3148 3149 /* Note segments written by reclaim; only for debugging */ 3150 if ((vp->v_iflag & VI_XLOCK) != 0) { 3151 sp->seg_flags |= SEGM_RECLAIM; 3152 fs->lfs_reclino = ip->i_number; 3153 } 3154 3155 /* 3156 * Ensure that the partial segment is marked SS_DIROP if this 3157 * vnode is a DIROP. 3158 */ 3159 if (!seglocked && vp->v_uflag & VU_DIROP) 3160 ((SEGSUM *)(sp->segsum))->ss_flags |= (SS_DIROP|SS_CONT); 3161 3162 /* 3163 * Loop over genfs_putpages until all pages are gathered. 3164 * genfs_putpages() drops the interlock, so reacquire it if necessary. 3165 * Whenever we lose the interlock we have to rerun check_dirty, as 3166 * well, since more pages might have been dirtied in our absence. 3167 */ 3168#ifdef DEBUG 3169 debug_n_again = 0; 3170#endif 3171 do { 3172 busypg = NULL; 3173 KASSERT(mutex_owned(vp->v_interlock)); 3174 if (check_dirty(fs, vp, startoffset, endoffset, blkeof, 3175 ap->a_flags, 0, &busypg) < 0) { 3176 mutex_exit(vp->v_interlock); 3177 /* XXX why? --ks */ 3178 mutex_enter(vp->v_interlock); 3179 write_and_wait(fs, vp, busypg, seglocked, NULL); 3180 if (!seglocked) { 3181 mutex_exit(vp->v_interlock); 3182 lfs_release_finfo(fs); 3183 lfs_segunlock(fs); 3184 mutex_enter(vp->v_interlock); 3185 } 3186 sp->vp = NULL; 3187 goto get_seglock; 3188 } 3189 3190 busypg = NULL; 3191 KASSERT(!mutex_owned(&uvm_pageqlock)); 3192 oreclaim = (ap->a_flags & PGO_RECLAIM); 3193 ap->a_flags &= ~PGO_RECLAIM; 3194 error = genfs_do_putpages(vp, startoffset, endoffset, 3195 ap->a_flags, &busypg); 3196 ap->a_flags |= oreclaim; 3197 3198 if (error == EDEADLK || error == EAGAIN) { 3199 DLOG((DLOG_PAGE, "lfs_putpages: genfs_putpages returned" 3200 " %d ino %d off %x (seg %d)\n", error, 3201 ip->i_number, fs->lfs_offset, 3202 dtosn(fs, fs->lfs_offset))); 3203 3204 if (oreclaim) { 3205 mutex_enter(vp->v_interlock); 3206 write_and_wait(fs, vp, busypg, seglocked, "again"); 3207 mutex_exit(vp->v_interlock); 3208 } else { 3209 if ((sp->seg_flags & SEGM_SINGLE) && 3210 fs->lfs_curseg != fs->lfs_startseg) 3211 donewriting = 1; 3212 } 3213 } else if (error) { 3214 DLOG((DLOG_PAGE, "lfs_putpages: genfs_putpages returned" 3215 " %d ino %d off %x (seg %d)\n", error, 3216 (int)ip->i_number, fs->lfs_offset, 3217 dtosn(fs, fs->lfs_offset))); 3218 } 3219 /* genfs_do_putpages loses the interlock */ 3220#ifdef DEBUG 3221 ++debug_n_again; 3222#endif 3223 if (oreclaim && error == EAGAIN) { 3224 DLOG((DLOG_PAGE, "vp %p ino %d vi_flags %x a_flags %x avoiding vclean panic\n", 3225 vp, (int)ip->i_number, vp->v_iflag, ap->a_flags)); 3226 mutex_enter(vp->v_interlock); 3227 } 3228 if (error == EDEADLK) 3229 mutex_enter(vp->v_interlock); 3230 } while (error == EDEADLK || (oreclaim && error == EAGAIN)); 3231#ifdef DEBUG 3232 if (debug_n_again > TOOMANY) 3233 DLOG((DLOG_PAGE, "lfs_putpages: again: looping, n = %d\n", debug_n_again)); 3234#endif 3235 3236 KASSERT(sp != NULL && sp->vp == vp); 3237 if (!seglocked && !donewriting) { 3238 sp->vp = NULL; 3239 3240 /* Write indirect blocks as well */ 3241 lfs_gather(fs, fs->lfs_sp, vp, lfs_match_indir); 3242 lfs_gather(fs, fs->lfs_sp, vp, lfs_match_dindir); 3243 lfs_gather(fs, fs->lfs_sp, vp, lfs_match_tindir); 3244 3245 KASSERT(sp->vp == NULL); 3246 sp->vp = vp; 3247 } 3248 3249 /* 3250 * Blocks are now gathered into a segment waiting to be written. 3251 * All that's left to do is update metadata, and write them. 3252 */ 3253 lfs_updatemeta(sp); 3254 KASSERT(sp->vp == vp); 3255 sp->vp = NULL; 3256 3257 /* 3258 * If we were called from lfs_writefile, we don't need to clean up 3259 * the FIP or unlock the segment lock. We're done. 3260 */ 3261 if (seglocked) { 3262 KASSERT(!mutex_owned(vp->v_interlock)); 3263 return error; 3264 } 3265 3266 /* Clean up FIP and send it to disk. */ 3267 lfs_release_finfo(fs); 3268 lfs_writeseg(fs, fs->lfs_sp); 3269 3270 /* 3271 * Remove us from paging queue if we wrote all our pages. 3272 */ 3273 if (origendoffset == 0 || ap->a_flags & PGO_ALLPAGES) { 3274 mutex_enter(&lfs_lock); 3275 if (ip->i_flags & IN_PAGING) { 3276 ip->i_flags &= ~IN_PAGING; 3277 TAILQ_REMOVE(&fs->lfs_pchainhd, ip, i_lfs_pchain); 3278 } 3279 mutex_exit(&lfs_lock); 3280 } 3281 3282 /* 3283 * XXX - with the malloc/copy writeseg, the pages are freed by now 3284 * even if we don't wait (e.g. if we hold a nested lock). This 3285 * will not be true if we stop using malloc/copy. 3286 */ 3287 KASSERT(fs->lfs_sp->seg_flags & SEGM_PROT); 3288 lfs_segunlock(fs); 3289 3290 /* 3291 * Wait for v_numoutput to drop to zero. The seglock should 3292 * take care of this, but there is a slight possibility that 3293 * aiodoned might not have got around to our buffers yet. 3294 */ 3295 if (sync) { 3296 mutex_enter(vp->v_interlock); 3297 while (vp->v_numoutput > 0) { 3298 DLOG((DLOG_PAGE, "lfs_putpages: ino %d sleeping on" 3299 " num %d\n", ip->i_number, vp->v_numoutput)); 3300 cv_wait(&vp->v_cv, vp->v_interlock); 3301 } 3302 mutex_exit(vp->v_interlock); 3303 } 3304 KASSERT(!mutex_owned(vp->v_interlock)); 3305 return error; 3306} 3307 3308/* 3309 * Return the last logical file offset that should be written for this file 3310 * if we're doing a write that ends at "size". If writing, we need to know 3311 * about sizes on disk, i.e. fragments if there are any; if reading, we need 3312 * to know about entire blocks. 3313 */ 3314void 3315lfs_gop_size(struct vnode *vp, off_t size, off_t *eobp, int flags) 3316{ 3317 struct inode *ip = VTOI(vp); 3318 struct lfs *fs = ip->i_lfs; 3319 daddr_t olbn, nlbn; 3320 3321 olbn = lblkno(fs, ip->i_size); 3322 nlbn = lblkno(fs, size); 3323 if (!(flags & GOP_SIZE_MEM) && nlbn < NDADDR && olbn <= nlbn) { 3324 *eobp = fragroundup(fs, size); 3325 } else { 3326 *eobp = blkroundup(fs, size); 3327 } 3328} 3329 3330#ifdef DEBUG 3331void lfs_dump_vop(void *); 3332 3333void 3334lfs_dump_vop(void *v) 3335{ 3336 struct vop_putpages_args /* { 3337 struct vnode *a_vp; 3338 voff_t a_offlo; 3339 voff_t a_offhi; 3340 int a_flags; 3341 } */ *ap = v; 3342 3343#ifdef DDB 3344 vfs_vnode_print(ap->a_vp, 0, printf); 3345#endif 3346 lfs_dump_dinode(VTOI(ap->a_vp)->i_din.ffs1_din); 3347} 3348#endif 3349 3350int 3351lfs_mmap(void *v) 3352{ 3353 struct vop_mmap_args /* { 3354 const struct vnodeop_desc *a_desc; 3355 struct vnode *a_vp; 3356 vm_prot_t a_prot; 3357 kauth_cred_t a_cred; 3358 } */ *ap = v; 3359 3360 if (VTOI(ap->a_vp)->i_number == LFS_IFILE_INUM) 3361 return EOPNOTSUPP; 3362 return ufs_mmap(v); 3363} 3364