vfs_vnops.c revision 275957
1/*- 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Copyright (c) 2012 Konstantin Belousov <kib@FreeBSD.org> 11 * Copyright (c) 2013, 2014 The FreeBSD Foundation 12 * 13 * Portions of this software were developed by Konstantin Belousov 14 * under sponsorship from the FreeBSD Foundation. 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions 18 * are met: 19 * 1. Redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer. 21 * 2. Redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * @(#)vfs_vnops.c 8.2 (Berkeley) 1/21/94 41 */ 42 43#include <sys/cdefs.h> 44__FBSDID("$FreeBSD: stable/10/sys/kern/vfs_vnops.c 275957 2014-12-20 15:49:12Z kib $"); 45 46#include <sys/param.h> 47#include <sys/systm.h> 48#include <sys/disk.h> 49#include <sys/fcntl.h> 50#include <sys/file.h> 51#include <sys/kdb.h> 52#include <sys/stat.h> 53#include <sys/priv.h> 54#include <sys/proc.h> 55#include <sys/limits.h> 56#include <sys/lock.h> 57#include <sys/mount.h> 58#include <sys/mutex.h> 59#include <sys/namei.h> 60#include <sys/vnode.h> 61#include <sys/bio.h> 62#include <sys/buf.h> 63#include <sys/filio.h> 64#include <sys/resourcevar.h> 65#include <sys/rwlock.h> 66#include <sys/sx.h> 67#include <sys/sysctl.h> 68#include <sys/ttycom.h> 69#include <sys/conf.h> 70#include <sys/syslog.h> 71#include <sys/unistd.h> 72 73#include <security/audit/audit.h> 74#include <security/mac/mac_framework.h> 75 76#include <vm/vm.h> 77#include <vm/vm_extern.h> 78#include <vm/pmap.h> 79#include <vm/vm_map.h> 80#include <vm/vm_object.h> 81#include <vm/vm_page.h> 82 83static fo_rdwr_t vn_read; 84static fo_rdwr_t vn_write; 85static fo_rdwr_t vn_io_fault; 86static fo_truncate_t vn_truncate; 87static fo_ioctl_t vn_ioctl; 88static fo_poll_t vn_poll; 89static fo_kqfilter_t vn_kqfilter; 90static fo_stat_t vn_statfile; 91static fo_close_t vn_closefile; 92 93struct fileops vnops = { 94 .fo_read = vn_io_fault, 95 .fo_write = vn_io_fault, 96 .fo_truncate = vn_truncate, 97 .fo_ioctl = vn_ioctl, 98 .fo_poll = vn_poll, 99 .fo_kqfilter = vn_kqfilter, 100 .fo_stat = vn_statfile, 101 .fo_close = vn_closefile, 102 .fo_chmod = vn_chmod, 103 .fo_chown = vn_chown, 104 .fo_sendfile = vn_sendfile, 105 .fo_seek = vn_seek, 106 .fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE 107}; 108 109static const int io_hold_cnt = 16; 110static int vn_io_fault_enable = 1; 111SYSCTL_INT(_debug, OID_AUTO, vn_io_fault_enable, CTLFLAG_RW, 112 &vn_io_fault_enable, 0, "Enable vn_io_fault lock avoidance"); 113static u_long vn_io_faults_cnt; 114SYSCTL_ULONG(_debug, OID_AUTO, vn_io_faults, CTLFLAG_RD, 115 &vn_io_faults_cnt, 0, "Count of vn_io_fault lock avoidance triggers"); 116 117/* 118 * Returns true if vn_io_fault mode of handling the i/o request should 119 * be used. 120 */ 121static bool 122do_vn_io_fault(struct vnode *vp, struct uio *uio) 123{ 124 struct mount *mp; 125 126 return (uio->uio_segflg == UIO_USERSPACE && vp->v_type == VREG && 127 (mp = vp->v_mount) != NULL && 128 (mp->mnt_kern_flag & MNTK_NO_IOPF) != 0 && vn_io_fault_enable); 129} 130 131/* 132 * Structure used to pass arguments to vn_io_fault1(), to do either 133 * file- or vnode-based I/O calls. 134 */ 135struct vn_io_fault_args { 136 enum { 137 VN_IO_FAULT_FOP, 138 VN_IO_FAULT_VOP 139 } kind; 140 struct ucred *cred; 141 int flags; 142 union { 143 struct fop_args_tag { 144 struct file *fp; 145 fo_rdwr_t *doio; 146 } fop_args; 147 struct vop_args_tag { 148 struct vnode *vp; 149 } vop_args; 150 } args; 151}; 152 153static int vn_io_fault1(struct vnode *vp, struct uio *uio, 154 struct vn_io_fault_args *args, struct thread *td); 155 156int 157vn_open(ndp, flagp, cmode, fp) 158 struct nameidata *ndp; 159 int *flagp, cmode; 160 struct file *fp; 161{ 162 struct thread *td = ndp->ni_cnd.cn_thread; 163 164 return (vn_open_cred(ndp, flagp, cmode, 0, td->td_ucred, fp)); 165} 166 167/* 168 * Common code for vnode open operations via a name lookup. 169 * Lookup the vnode and invoke VOP_CREATE if needed. 170 * Check permissions, and call the VOP_OPEN or VOP_CREATE routine. 171 * 172 * Note that this does NOT free nameidata for the successful case, 173 * due to the NDINIT being done elsewhere. 174 */ 175int 176vn_open_cred(struct nameidata *ndp, int *flagp, int cmode, u_int vn_open_flags, 177 struct ucred *cred, struct file *fp) 178{ 179 struct vnode *vp; 180 struct mount *mp; 181 struct thread *td = ndp->ni_cnd.cn_thread; 182 struct vattr vat; 183 struct vattr *vap = &vat; 184 int fmode, error; 185 186restart: 187 fmode = *flagp; 188 if (fmode & O_CREAT) { 189 ndp->ni_cnd.cn_nameiop = CREATE; 190 ndp->ni_cnd.cn_flags = ISOPEN | LOCKPARENT | LOCKLEAF; 191 if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0) 192 ndp->ni_cnd.cn_flags |= FOLLOW; 193 if (!(vn_open_flags & VN_OPEN_NOAUDIT)) 194 ndp->ni_cnd.cn_flags |= AUDITVNODE1; 195 if (vn_open_flags & VN_OPEN_NOCAPCHECK) 196 ndp->ni_cnd.cn_flags |= NOCAPCHECK; 197 bwillwrite(); 198 if ((error = namei(ndp)) != 0) 199 return (error); 200 if (ndp->ni_vp == NULL) { 201 VATTR_NULL(vap); 202 vap->va_type = VREG; 203 vap->va_mode = cmode; 204 if (fmode & O_EXCL) 205 vap->va_vaflags |= VA_EXCLUSIVE; 206 if (vn_start_write(ndp->ni_dvp, &mp, V_NOWAIT) != 0) { 207 NDFREE(ndp, NDF_ONLY_PNBUF); 208 vput(ndp->ni_dvp); 209 if ((error = vn_start_write(NULL, &mp, 210 V_XSLEEP | PCATCH)) != 0) 211 return (error); 212 goto restart; 213 } 214#ifdef MAC 215 error = mac_vnode_check_create(cred, ndp->ni_dvp, 216 &ndp->ni_cnd, vap); 217 if (error == 0) 218#endif 219 error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp, 220 &ndp->ni_cnd, vap); 221 vput(ndp->ni_dvp); 222 vn_finished_write(mp); 223 if (error) { 224 NDFREE(ndp, NDF_ONLY_PNBUF); 225 return (error); 226 } 227 fmode &= ~O_TRUNC; 228 vp = ndp->ni_vp; 229 } else { 230 if (ndp->ni_dvp == ndp->ni_vp) 231 vrele(ndp->ni_dvp); 232 else 233 vput(ndp->ni_dvp); 234 ndp->ni_dvp = NULL; 235 vp = ndp->ni_vp; 236 if (fmode & O_EXCL) { 237 error = EEXIST; 238 goto bad; 239 } 240 fmode &= ~O_CREAT; 241 } 242 } else { 243 ndp->ni_cnd.cn_nameiop = LOOKUP; 244 ndp->ni_cnd.cn_flags = ISOPEN | 245 ((fmode & O_NOFOLLOW) ? NOFOLLOW : FOLLOW) | LOCKLEAF; 246 if (!(fmode & FWRITE)) 247 ndp->ni_cnd.cn_flags |= LOCKSHARED; 248 if (!(vn_open_flags & VN_OPEN_NOAUDIT)) 249 ndp->ni_cnd.cn_flags |= AUDITVNODE1; 250 if (vn_open_flags & VN_OPEN_NOCAPCHECK) 251 ndp->ni_cnd.cn_flags |= NOCAPCHECK; 252 if ((error = namei(ndp)) != 0) 253 return (error); 254 vp = ndp->ni_vp; 255 } 256 error = vn_open_vnode(vp, fmode, cred, td, fp); 257 if (error) 258 goto bad; 259 *flagp = fmode; 260 return (0); 261bad: 262 NDFREE(ndp, NDF_ONLY_PNBUF); 263 vput(vp); 264 *flagp = fmode; 265 ndp->ni_vp = NULL; 266 return (error); 267} 268 269/* 270 * Common code for vnode open operations once a vnode is located. 271 * Check permissions, and call the VOP_OPEN routine. 272 */ 273int 274vn_open_vnode(struct vnode *vp, int fmode, struct ucred *cred, 275 struct thread *td, struct file *fp) 276{ 277 struct mount *mp; 278 accmode_t accmode; 279 struct flock lf; 280 int error, have_flock, lock_flags, type; 281 282 if (vp->v_type == VLNK) 283 return (EMLINK); 284 if (vp->v_type == VSOCK) 285 return (EOPNOTSUPP); 286 if (vp->v_type != VDIR && fmode & O_DIRECTORY) 287 return (ENOTDIR); 288 accmode = 0; 289 if (fmode & (FWRITE | O_TRUNC)) { 290 if (vp->v_type == VDIR) 291 return (EISDIR); 292 accmode |= VWRITE; 293 } 294 if (fmode & FREAD) 295 accmode |= VREAD; 296 if (fmode & FEXEC) 297 accmode |= VEXEC; 298 if ((fmode & O_APPEND) && (fmode & FWRITE)) 299 accmode |= VAPPEND; 300#ifdef MAC 301 error = mac_vnode_check_open(cred, vp, accmode); 302 if (error) 303 return (error); 304#endif 305 if ((fmode & O_CREAT) == 0) { 306 if (accmode & VWRITE) { 307 error = vn_writechk(vp); 308 if (error) 309 return (error); 310 } 311 if (accmode) { 312 error = VOP_ACCESS(vp, accmode, cred, td); 313 if (error) 314 return (error); 315 } 316 } 317 if (vp->v_type == VFIFO && VOP_ISLOCKED(vp) != LK_EXCLUSIVE) 318 vn_lock(vp, LK_UPGRADE | LK_RETRY); 319 if ((error = VOP_OPEN(vp, fmode, cred, td, fp)) != 0) 320 return (error); 321 322 if (fmode & (O_EXLOCK | O_SHLOCK)) { 323 KASSERT(fp != NULL, ("open with flock requires fp")); 324 lock_flags = VOP_ISLOCKED(vp); 325 VOP_UNLOCK(vp, 0); 326 lf.l_whence = SEEK_SET; 327 lf.l_start = 0; 328 lf.l_len = 0; 329 if (fmode & O_EXLOCK) 330 lf.l_type = F_WRLCK; 331 else 332 lf.l_type = F_RDLCK; 333 type = F_FLOCK; 334 if ((fmode & FNONBLOCK) == 0) 335 type |= F_WAIT; 336 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, type); 337 have_flock = (error == 0); 338 vn_lock(vp, lock_flags | LK_RETRY); 339 if (error == 0 && vp->v_iflag & VI_DOOMED) 340 error = ENOENT; 341 /* 342 * Another thread might have used this vnode as an 343 * executable while the vnode lock was dropped. 344 * Ensure the vnode is still able to be opened for 345 * writing after the lock has been obtained. 346 */ 347 if (error == 0 && accmode & VWRITE) 348 error = vn_writechk(vp); 349 if (error) { 350 VOP_UNLOCK(vp, 0); 351 if (have_flock) { 352 lf.l_whence = SEEK_SET; 353 lf.l_start = 0; 354 lf.l_len = 0; 355 lf.l_type = F_UNLCK; 356 (void) VOP_ADVLOCK(vp, fp, F_UNLCK, &lf, 357 F_FLOCK); 358 } 359 vn_start_write(vp, &mp, V_WAIT); 360 vn_lock(vp, lock_flags | LK_RETRY); 361 (void)VOP_CLOSE(vp, fmode, cred, td); 362 vn_finished_write(mp); 363 /* Prevent second close from fdrop()->vn_close(). */ 364 if (fp != NULL) 365 fp->f_ops= &badfileops; 366 return (error); 367 } 368 fp->f_flag |= FHASLOCK; 369 } 370 if (fmode & FWRITE) { 371 VOP_ADD_WRITECOUNT(vp, 1); 372 CTR3(KTR_VFS, "%s: vp %p v_writecount increased to %d", 373 __func__, vp, vp->v_writecount); 374 } 375 ASSERT_VOP_LOCKED(vp, "vn_open_vnode"); 376 return (0); 377} 378 379/* 380 * Check for write permissions on the specified vnode. 381 * Prototype text segments cannot be written. 382 */ 383int 384vn_writechk(vp) 385 register struct vnode *vp; 386{ 387 388 ASSERT_VOP_LOCKED(vp, "vn_writechk"); 389 /* 390 * If there's shared text associated with 391 * the vnode, try to free it up once. If 392 * we fail, we can't allow writing. 393 */ 394 if (VOP_IS_TEXT(vp)) 395 return (ETXTBSY); 396 397 return (0); 398} 399 400/* 401 * Vnode close call 402 */ 403int 404vn_close(vp, flags, file_cred, td) 405 register struct vnode *vp; 406 int flags; 407 struct ucred *file_cred; 408 struct thread *td; 409{ 410 struct mount *mp; 411 int error, lock_flags; 412 413 if (vp->v_type != VFIFO && (flags & FWRITE) == 0 && 414 MNT_EXTENDED_SHARED(vp->v_mount)) 415 lock_flags = LK_SHARED; 416 else 417 lock_flags = LK_EXCLUSIVE; 418 419 vn_start_write(vp, &mp, V_WAIT); 420 vn_lock(vp, lock_flags | LK_RETRY); 421 if (flags & FWRITE) { 422 VNASSERT(vp->v_writecount > 0, vp, 423 ("vn_close: negative writecount")); 424 VOP_ADD_WRITECOUNT(vp, -1); 425 CTR3(KTR_VFS, "%s: vp %p v_writecount decreased to %d", 426 __func__, vp, vp->v_writecount); 427 } 428 error = VOP_CLOSE(vp, flags, file_cred, td); 429 vput(vp); 430 vn_finished_write(mp); 431 return (error); 432} 433 434/* 435 * Heuristic to detect sequential operation. 436 */ 437static int 438sequential_heuristic(struct uio *uio, struct file *fp) 439{ 440 441 ASSERT_VOP_LOCKED(fp->f_vnode, __func__); 442 if (fp->f_flag & FRDAHEAD) 443 return (fp->f_seqcount << IO_SEQSHIFT); 444 445 /* 446 * Offset 0 is handled specially. open() sets f_seqcount to 1 so 447 * that the first I/O is normally considered to be slightly 448 * sequential. Seeking to offset 0 doesn't change sequentiality 449 * unless previous seeks have reduced f_seqcount to 0, in which 450 * case offset 0 is not special. 451 */ 452 if ((uio->uio_offset == 0 && fp->f_seqcount > 0) || 453 uio->uio_offset == fp->f_nextoff) { 454 /* 455 * f_seqcount is in units of fixed-size blocks so that it 456 * depends mainly on the amount of sequential I/O and not 457 * much on the number of sequential I/O's. The fixed size 458 * of 16384 is hard-coded here since it is (not quite) just 459 * a magic size that works well here. This size is more 460 * closely related to the best I/O size for real disks than 461 * to any block size used by software. 462 */ 463 fp->f_seqcount += howmany(uio->uio_resid, 16384); 464 if (fp->f_seqcount > IO_SEQMAX) 465 fp->f_seqcount = IO_SEQMAX; 466 return (fp->f_seqcount << IO_SEQSHIFT); 467 } 468 469 /* Not sequential. Quickly draw-down sequentiality. */ 470 if (fp->f_seqcount > 1) 471 fp->f_seqcount = 1; 472 else 473 fp->f_seqcount = 0; 474 return (0); 475} 476 477/* 478 * Package up an I/O request on a vnode into a uio and do it. 479 */ 480int 481vn_rdwr(enum uio_rw rw, struct vnode *vp, void *base, int len, off_t offset, 482 enum uio_seg segflg, int ioflg, struct ucred *active_cred, 483 struct ucred *file_cred, ssize_t *aresid, struct thread *td) 484{ 485 struct uio auio; 486 struct iovec aiov; 487 struct mount *mp; 488 struct ucred *cred; 489 void *rl_cookie; 490 struct vn_io_fault_args args; 491 int error, lock_flags; 492 493 auio.uio_iov = &aiov; 494 auio.uio_iovcnt = 1; 495 aiov.iov_base = base; 496 aiov.iov_len = len; 497 auio.uio_resid = len; 498 auio.uio_offset = offset; 499 auio.uio_segflg = segflg; 500 auio.uio_rw = rw; 501 auio.uio_td = td; 502 error = 0; 503 504 if ((ioflg & IO_NODELOCKED) == 0) { 505 if ((ioflg & IO_RANGELOCKED) == 0) { 506 if (rw == UIO_READ) { 507 rl_cookie = vn_rangelock_rlock(vp, offset, 508 offset + len); 509 } else { 510 rl_cookie = vn_rangelock_wlock(vp, offset, 511 offset + len); 512 } 513 } else 514 rl_cookie = NULL; 515 mp = NULL; 516 if (rw == UIO_WRITE) { 517 if (vp->v_type != VCHR && 518 (error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) 519 != 0) 520 goto out; 521 if (MNT_SHARED_WRITES(mp) || 522 ((mp == NULL) && MNT_SHARED_WRITES(vp->v_mount))) 523 lock_flags = LK_SHARED; 524 else 525 lock_flags = LK_EXCLUSIVE; 526 } else 527 lock_flags = LK_SHARED; 528 vn_lock(vp, lock_flags | LK_RETRY); 529 } else 530 rl_cookie = NULL; 531 532 ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held"); 533#ifdef MAC 534 if ((ioflg & IO_NOMACCHECK) == 0) { 535 if (rw == UIO_READ) 536 error = mac_vnode_check_read(active_cred, file_cred, 537 vp); 538 else 539 error = mac_vnode_check_write(active_cred, file_cred, 540 vp); 541 } 542#endif 543 if (error == 0) { 544 if (file_cred != NULL) 545 cred = file_cred; 546 else 547 cred = active_cred; 548 if (do_vn_io_fault(vp, &auio)) { 549 args.kind = VN_IO_FAULT_VOP; 550 args.cred = cred; 551 args.flags = ioflg; 552 args.args.vop_args.vp = vp; 553 error = vn_io_fault1(vp, &auio, &args, td); 554 } else if (rw == UIO_READ) { 555 error = VOP_READ(vp, &auio, ioflg, cred); 556 } else /* if (rw == UIO_WRITE) */ { 557 error = VOP_WRITE(vp, &auio, ioflg, cred); 558 } 559 } 560 if (aresid) 561 *aresid = auio.uio_resid; 562 else 563 if (auio.uio_resid && error == 0) 564 error = EIO; 565 if ((ioflg & IO_NODELOCKED) == 0) { 566 VOP_UNLOCK(vp, 0); 567 if (mp != NULL) 568 vn_finished_write(mp); 569 } 570 out: 571 if (rl_cookie != NULL) 572 vn_rangelock_unlock(vp, rl_cookie); 573 return (error); 574} 575 576/* 577 * Package up an I/O request on a vnode into a uio and do it. The I/O 578 * request is split up into smaller chunks and we try to avoid saturating 579 * the buffer cache while potentially holding a vnode locked, so we 580 * check bwillwrite() before calling vn_rdwr(). We also call kern_yield() 581 * to give other processes a chance to lock the vnode (either other processes 582 * core'ing the same binary, or unrelated processes scanning the directory). 583 */ 584int 585vn_rdwr_inchunks(rw, vp, base, len, offset, segflg, ioflg, active_cred, 586 file_cred, aresid, td) 587 enum uio_rw rw; 588 struct vnode *vp; 589 void *base; 590 size_t len; 591 off_t offset; 592 enum uio_seg segflg; 593 int ioflg; 594 struct ucred *active_cred; 595 struct ucred *file_cred; 596 size_t *aresid; 597 struct thread *td; 598{ 599 int error = 0; 600 ssize_t iaresid; 601 602 do { 603 int chunk; 604 605 /* 606 * Force `offset' to a multiple of MAXBSIZE except possibly 607 * for the first chunk, so that filesystems only need to 608 * write full blocks except possibly for the first and last 609 * chunks. 610 */ 611 chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE; 612 613 if (chunk > len) 614 chunk = len; 615 if (rw != UIO_READ && vp->v_type == VREG) 616 bwillwrite(); 617 iaresid = 0; 618 error = vn_rdwr(rw, vp, base, chunk, offset, segflg, 619 ioflg, active_cred, file_cred, &iaresid, td); 620 len -= chunk; /* aresid calc already includes length */ 621 if (error) 622 break; 623 offset += chunk; 624 base = (char *)base + chunk; 625 kern_yield(PRI_USER); 626 } while (len); 627 if (aresid) 628 *aresid = len + iaresid; 629 return (error); 630} 631 632off_t 633foffset_lock(struct file *fp, int flags) 634{ 635 struct mtx *mtxp; 636 off_t res; 637 638 KASSERT((flags & FOF_OFFSET) == 0, ("FOF_OFFSET passed")); 639 640#if OFF_MAX <= LONG_MAX 641 /* 642 * Caller only wants the current f_offset value. Assume that 643 * the long and shorter integer types reads are atomic. 644 */ 645 if ((flags & FOF_NOLOCK) != 0) 646 return (fp->f_offset); 647#endif 648 649 /* 650 * According to McKusick the vn lock was protecting f_offset here. 651 * It is now protected by the FOFFSET_LOCKED flag. 652 */ 653 mtxp = mtx_pool_find(mtxpool_sleep, fp); 654 mtx_lock(mtxp); 655 if ((flags & FOF_NOLOCK) == 0) { 656 while (fp->f_vnread_flags & FOFFSET_LOCKED) { 657 fp->f_vnread_flags |= FOFFSET_LOCK_WAITING; 658 msleep(&fp->f_vnread_flags, mtxp, PUSER -1, 659 "vofflock", 0); 660 } 661 fp->f_vnread_flags |= FOFFSET_LOCKED; 662 } 663 res = fp->f_offset; 664 mtx_unlock(mtxp); 665 return (res); 666} 667 668void 669foffset_unlock(struct file *fp, off_t val, int flags) 670{ 671 struct mtx *mtxp; 672 673 KASSERT((flags & FOF_OFFSET) == 0, ("FOF_OFFSET passed")); 674 675#if OFF_MAX <= LONG_MAX 676 if ((flags & FOF_NOLOCK) != 0) { 677 if ((flags & FOF_NOUPDATE) == 0) 678 fp->f_offset = val; 679 if ((flags & FOF_NEXTOFF) != 0) 680 fp->f_nextoff = val; 681 return; 682 } 683#endif 684 685 mtxp = mtx_pool_find(mtxpool_sleep, fp); 686 mtx_lock(mtxp); 687 if ((flags & FOF_NOUPDATE) == 0) 688 fp->f_offset = val; 689 if ((flags & FOF_NEXTOFF) != 0) 690 fp->f_nextoff = val; 691 if ((flags & FOF_NOLOCK) == 0) { 692 KASSERT((fp->f_vnread_flags & FOFFSET_LOCKED) != 0, 693 ("Lost FOFFSET_LOCKED")); 694 if (fp->f_vnread_flags & FOFFSET_LOCK_WAITING) 695 wakeup(&fp->f_vnread_flags); 696 fp->f_vnread_flags = 0; 697 } 698 mtx_unlock(mtxp); 699} 700 701void 702foffset_lock_uio(struct file *fp, struct uio *uio, int flags) 703{ 704 705 if ((flags & FOF_OFFSET) == 0) 706 uio->uio_offset = foffset_lock(fp, flags); 707} 708 709void 710foffset_unlock_uio(struct file *fp, struct uio *uio, int flags) 711{ 712 713 if ((flags & FOF_OFFSET) == 0) 714 foffset_unlock(fp, uio->uio_offset, flags); 715} 716 717static int 718get_advice(struct file *fp, struct uio *uio) 719{ 720 struct mtx *mtxp; 721 int ret; 722 723 ret = POSIX_FADV_NORMAL; 724 if (fp->f_advice == NULL) 725 return (ret); 726 727 mtxp = mtx_pool_find(mtxpool_sleep, fp); 728 mtx_lock(mtxp); 729 if (uio->uio_offset >= fp->f_advice->fa_start && 730 uio->uio_offset + uio->uio_resid <= fp->f_advice->fa_end) 731 ret = fp->f_advice->fa_advice; 732 mtx_unlock(mtxp); 733 return (ret); 734} 735 736/* 737 * File table vnode read routine. 738 */ 739static int 740vn_read(fp, uio, active_cred, flags, td) 741 struct file *fp; 742 struct uio *uio; 743 struct ucred *active_cred; 744 int flags; 745 struct thread *td; 746{ 747 struct vnode *vp; 748 struct mtx *mtxp; 749 int error, ioflag; 750 int advice; 751 off_t offset, start, end; 752 753 KASSERT(uio->uio_td == td, ("uio_td %p is not td %p", 754 uio->uio_td, td)); 755 KASSERT(flags & FOF_OFFSET, ("No FOF_OFFSET")); 756 vp = fp->f_vnode; 757 ioflag = 0; 758 if (fp->f_flag & FNONBLOCK) 759 ioflag |= IO_NDELAY; 760 if (fp->f_flag & O_DIRECT) 761 ioflag |= IO_DIRECT; 762 advice = get_advice(fp, uio); 763 vn_lock(vp, LK_SHARED | LK_RETRY); 764 765 switch (advice) { 766 case POSIX_FADV_NORMAL: 767 case POSIX_FADV_SEQUENTIAL: 768 case POSIX_FADV_NOREUSE: 769 ioflag |= sequential_heuristic(uio, fp); 770 break; 771 case POSIX_FADV_RANDOM: 772 /* Disable read-ahead for random I/O. */ 773 break; 774 } 775 offset = uio->uio_offset; 776 777#ifdef MAC 778 error = mac_vnode_check_read(active_cred, fp->f_cred, vp); 779 if (error == 0) 780#endif 781 error = VOP_READ(vp, uio, ioflag, fp->f_cred); 782 fp->f_nextoff = uio->uio_offset; 783 VOP_UNLOCK(vp, 0); 784 if (error == 0 && advice == POSIX_FADV_NOREUSE && 785 offset != uio->uio_offset) { 786 /* 787 * Use POSIX_FADV_DONTNEED to flush clean pages and 788 * buffers for the backing file after a 789 * POSIX_FADV_NOREUSE read(2). To optimize the common 790 * case of using POSIX_FADV_NOREUSE with sequential 791 * access, track the previous implicit DONTNEED 792 * request and grow this request to include the 793 * current read(2) in addition to the previous 794 * DONTNEED. With purely sequential access this will 795 * cause the DONTNEED requests to continously grow to 796 * cover all of the previously read regions of the 797 * file. This allows filesystem blocks that are 798 * accessed by multiple calls to read(2) to be flushed 799 * once the last read(2) finishes. 800 */ 801 start = offset; 802 end = uio->uio_offset - 1; 803 mtxp = mtx_pool_find(mtxpool_sleep, fp); 804 mtx_lock(mtxp); 805 if (fp->f_advice != NULL && 806 fp->f_advice->fa_advice == POSIX_FADV_NOREUSE) { 807 if (start != 0 && fp->f_advice->fa_prevend + 1 == start) 808 start = fp->f_advice->fa_prevstart; 809 else if (fp->f_advice->fa_prevstart != 0 && 810 fp->f_advice->fa_prevstart == end + 1) 811 end = fp->f_advice->fa_prevend; 812 fp->f_advice->fa_prevstart = start; 813 fp->f_advice->fa_prevend = end; 814 } 815 mtx_unlock(mtxp); 816 error = VOP_ADVISE(vp, start, end, POSIX_FADV_DONTNEED); 817 } 818 return (error); 819} 820 821/* 822 * File table vnode write routine. 823 */ 824static int 825vn_write(fp, uio, active_cred, flags, td) 826 struct file *fp; 827 struct uio *uio; 828 struct ucred *active_cred; 829 int flags; 830 struct thread *td; 831{ 832 struct vnode *vp; 833 struct mount *mp; 834 struct mtx *mtxp; 835 int error, ioflag, lock_flags; 836 int advice; 837 off_t offset, start, end; 838 839 KASSERT(uio->uio_td == td, ("uio_td %p is not td %p", 840 uio->uio_td, td)); 841 KASSERT(flags & FOF_OFFSET, ("No FOF_OFFSET")); 842 vp = fp->f_vnode; 843 if (vp->v_type == VREG) 844 bwillwrite(); 845 ioflag = IO_UNIT; 846 if (vp->v_type == VREG && (fp->f_flag & O_APPEND)) 847 ioflag |= IO_APPEND; 848 if (fp->f_flag & FNONBLOCK) 849 ioflag |= IO_NDELAY; 850 if (fp->f_flag & O_DIRECT) 851 ioflag |= IO_DIRECT; 852 if ((fp->f_flag & O_FSYNC) || 853 (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))) 854 ioflag |= IO_SYNC; 855 mp = NULL; 856 if (vp->v_type != VCHR && 857 (error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0) 858 goto unlock; 859 860 advice = get_advice(fp, uio); 861 862 if (MNT_SHARED_WRITES(mp) || 863 (mp == NULL && MNT_SHARED_WRITES(vp->v_mount))) { 864 lock_flags = LK_SHARED; 865 } else { 866 lock_flags = LK_EXCLUSIVE; 867 } 868 869 vn_lock(vp, lock_flags | LK_RETRY); 870 switch (advice) { 871 case POSIX_FADV_NORMAL: 872 case POSIX_FADV_SEQUENTIAL: 873 case POSIX_FADV_NOREUSE: 874 ioflag |= sequential_heuristic(uio, fp); 875 break; 876 case POSIX_FADV_RANDOM: 877 /* XXX: Is this correct? */ 878 break; 879 } 880 offset = uio->uio_offset; 881 882#ifdef MAC 883 error = mac_vnode_check_write(active_cred, fp->f_cred, vp); 884 if (error == 0) 885#endif 886 error = VOP_WRITE(vp, uio, ioflag, fp->f_cred); 887 fp->f_nextoff = uio->uio_offset; 888 VOP_UNLOCK(vp, 0); 889 if (vp->v_type != VCHR) 890 vn_finished_write(mp); 891 if (error == 0 && advice == POSIX_FADV_NOREUSE && 892 offset != uio->uio_offset) { 893 /* 894 * Use POSIX_FADV_DONTNEED to flush clean pages and 895 * buffers for the backing file after a 896 * POSIX_FADV_NOREUSE write(2). To optimize the 897 * common case of using POSIX_FADV_NOREUSE with 898 * sequential access, track the previous implicit 899 * DONTNEED request and grow this request to include 900 * the current write(2) in addition to the previous 901 * DONTNEED. With purely sequential access this will 902 * cause the DONTNEED requests to continously grow to 903 * cover all of the previously written regions of the 904 * file. 905 * 906 * Note that the blocks just written are almost 907 * certainly still dirty, so this only works when 908 * VOP_ADVISE() calls from subsequent writes push out 909 * the data written by this write(2) once the backing 910 * buffers are clean. However, as compared to forcing 911 * IO_DIRECT, this gives much saner behavior. Write 912 * clustering is still allowed, and clean pages are 913 * merely moved to the cache page queue rather than 914 * outright thrown away. This means a subsequent 915 * read(2) can still avoid hitting the disk if the 916 * pages have not been reclaimed. 917 * 918 * This does make POSIX_FADV_NOREUSE largely useless 919 * with non-sequential access. However, sequential 920 * access is the more common use case and the flag is 921 * merely advisory. 922 */ 923 start = offset; 924 end = uio->uio_offset - 1; 925 mtxp = mtx_pool_find(mtxpool_sleep, fp); 926 mtx_lock(mtxp); 927 if (fp->f_advice != NULL && 928 fp->f_advice->fa_advice == POSIX_FADV_NOREUSE) { 929 if (start != 0 && fp->f_advice->fa_prevend + 1 == start) 930 start = fp->f_advice->fa_prevstart; 931 else if (fp->f_advice->fa_prevstart != 0 && 932 fp->f_advice->fa_prevstart == end + 1) 933 end = fp->f_advice->fa_prevend; 934 fp->f_advice->fa_prevstart = start; 935 fp->f_advice->fa_prevend = end; 936 } 937 mtx_unlock(mtxp); 938 error = VOP_ADVISE(vp, start, end, POSIX_FADV_DONTNEED); 939 } 940 941unlock: 942 return (error); 943} 944 945/* 946 * The vn_io_fault() is a wrapper around vn_read() and vn_write() to 947 * prevent the following deadlock: 948 * 949 * Assume that the thread A reads from the vnode vp1 into userspace 950 * buffer buf1 backed by the pages of vnode vp2. If a page in buf1 is 951 * currently not resident, then system ends up with the call chain 952 * vn_read() -> VOP_READ(vp1) -> uiomove() -> [Page Fault] -> 953 * vm_fault(buf1) -> vnode_pager_getpages(vp2) -> VOP_GETPAGES(vp2) 954 * which establishes lock order vp1->vn_lock, then vp2->vn_lock. 955 * If, at the same time, thread B reads from vnode vp2 into buffer buf2 956 * backed by the pages of vnode vp1, and some page in buf2 is not 957 * resident, we get a reversed order vp2->vn_lock, then vp1->vn_lock. 958 * 959 * To prevent the lock order reversal and deadlock, vn_io_fault() does 960 * not allow page faults to happen during VOP_READ() or VOP_WRITE(). 961 * Instead, it first tries to do the whole range i/o with pagefaults 962 * disabled. If all pages in the i/o buffer are resident and mapped, 963 * VOP will succeed (ignoring the genuine filesystem errors). 964 * Otherwise, we get back EFAULT, and vn_io_fault() falls back to do 965 * i/o in chunks, with all pages in the chunk prefaulted and held 966 * using vm_fault_quick_hold_pages(). 967 * 968 * Filesystems using this deadlock avoidance scheme should use the 969 * array of the held pages from uio, saved in the curthread->td_ma, 970 * instead of doing uiomove(). A helper function 971 * vn_io_fault_uiomove() converts uiomove request into 972 * uiomove_fromphys() over td_ma array. 973 * 974 * Since vnode locks do not cover the whole i/o anymore, rangelocks 975 * make the current i/o request atomic with respect to other i/os and 976 * truncations. 977 */ 978 979/* 980 * Decode vn_io_fault_args and perform the corresponding i/o. 981 */ 982static int 983vn_io_fault_doio(struct vn_io_fault_args *args, struct uio *uio, 984 struct thread *td) 985{ 986 987 switch (args->kind) { 988 case VN_IO_FAULT_FOP: 989 return ((args->args.fop_args.doio)(args->args.fop_args.fp, 990 uio, args->cred, args->flags, td)); 991 case VN_IO_FAULT_VOP: 992 if (uio->uio_rw == UIO_READ) { 993 return (VOP_READ(args->args.vop_args.vp, uio, 994 args->flags, args->cred)); 995 } else if (uio->uio_rw == UIO_WRITE) { 996 return (VOP_WRITE(args->args.vop_args.vp, uio, 997 args->flags, args->cred)); 998 } 999 break; 1000 } 1001 panic("vn_io_fault_doio: unknown kind of io %d %d", args->kind, 1002 uio->uio_rw); 1003} 1004 1005/* 1006 * Common code for vn_io_fault(), agnostic to the kind of i/o request. 1007 * Uses vn_io_fault_doio() to make the call to an actual i/o function. 1008 * Used from vn_rdwr() and vn_io_fault(), which encode the i/o request 1009 * into args and call vn_io_fault1() to handle faults during the user 1010 * mode buffer accesses. 1011 */ 1012static int 1013vn_io_fault1(struct vnode *vp, struct uio *uio, struct vn_io_fault_args *args, 1014 struct thread *td) 1015{ 1016 vm_page_t ma[io_hold_cnt + 2]; 1017 struct uio *uio_clone, short_uio; 1018 struct iovec short_iovec[1]; 1019 vm_page_t *prev_td_ma; 1020 vm_prot_t prot; 1021 vm_offset_t addr, end; 1022 size_t len, resid; 1023 ssize_t adv; 1024 int error, cnt, save, saveheld, prev_td_ma_cnt; 1025 1026 prot = uio->uio_rw == UIO_READ ? VM_PROT_WRITE : VM_PROT_READ; 1027 1028 /* 1029 * The UFS follows IO_UNIT directive and replays back both 1030 * uio_offset and uio_resid if an error is encountered during the 1031 * operation. But, since the iovec may be already advanced, 1032 * uio is still in an inconsistent state. 1033 * 1034 * Cache a copy of the original uio, which is advanced to the redo 1035 * point using UIO_NOCOPY below. 1036 */ 1037 uio_clone = cloneuio(uio); 1038 resid = uio->uio_resid; 1039 1040 short_uio.uio_segflg = UIO_USERSPACE; 1041 short_uio.uio_rw = uio->uio_rw; 1042 short_uio.uio_td = uio->uio_td; 1043 1044 save = vm_fault_disable_pagefaults(); 1045 error = vn_io_fault_doio(args, uio, td); 1046 if (error != EFAULT) 1047 goto out; 1048 1049 atomic_add_long(&vn_io_faults_cnt, 1); 1050 uio_clone->uio_segflg = UIO_NOCOPY; 1051 uiomove(NULL, resid - uio->uio_resid, uio_clone); 1052 uio_clone->uio_segflg = uio->uio_segflg; 1053 1054 saveheld = curthread_pflags_set(TDP_UIOHELD); 1055 prev_td_ma = td->td_ma; 1056 prev_td_ma_cnt = td->td_ma_cnt; 1057 1058 while (uio_clone->uio_resid != 0) { 1059 len = uio_clone->uio_iov->iov_len; 1060 if (len == 0) { 1061 KASSERT(uio_clone->uio_iovcnt >= 1, 1062 ("iovcnt underflow")); 1063 uio_clone->uio_iov++; 1064 uio_clone->uio_iovcnt--; 1065 continue; 1066 } 1067 if (len > io_hold_cnt * PAGE_SIZE) 1068 len = io_hold_cnt * PAGE_SIZE; 1069 addr = (uintptr_t)uio_clone->uio_iov->iov_base; 1070 end = round_page(addr + len); 1071 if (end < addr) { 1072 error = EFAULT; 1073 break; 1074 } 1075 cnt = atop(end - trunc_page(addr)); 1076 /* 1077 * A perfectly misaligned address and length could cause 1078 * both the start and the end of the chunk to use partial 1079 * page. +2 accounts for such a situation. 1080 */ 1081 cnt = vm_fault_quick_hold_pages(&td->td_proc->p_vmspace->vm_map, 1082 addr, len, prot, ma, io_hold_cnt + 2); 1083 if (cnt == -1) { 1084 error = EFAULT; 1085 break; 1086 } 1087 short_uio.uio_iov = &short_iovec[0]; 1088 short_iovec[0].iov_base = (void *)addr; 1089 short_uio.uio_iovcnt = 1; 1090 short_uio.uio_resid = short_iovec[0].iov_len = len; 1091 short_uio.uio_offset = uio_clone->uio_offset; 1092 td->td_ma = ma; 1093 td->td_ma_cnt = cnt; 1094 1095 error = vn_io_fault_doio(args, &short_uio, td); 1096 vm_page_unhold_pages(ma, cnt); 1097 adv = len - short_uio.uio_resid; 1098 1099 uio_clone->uio_iov->iov_base = 1100 (char *)uio_clone->uio_iov->iov_base + adv; 1101 uio_clone->uio_iov->iov_len -= adv; 1102 uio_clone->uio_resid -= adv; 1103 uio_clone->uio_offset += adv; 1104 1105 uio->uio_resid -= adv; 1106 uio->uio_offset += adv; 1107 1108 if (error != 0 || adv == 0) 1109 break; 1110 } 1111 td->td_ma = prev_td_ma; 1112 td->td_ma_cnt = prev_td_ma_cnt; 1113 curthread_pflags_restore(saveheld); 1114out: 1115 vm_fault_enable_pagefaults(save); 1116 free(uio_clone, M_IOV); 1117 return (error); 1118} 1119 1120static int 1121vn_io_fault(struct file *fp, struct uio *uio, struct ucred *active_cred, 1122 int flags, struct thread *td) 1123{ 1124 fo_rdwr_t *doio; 1125 struct vnode *vp; 1126 void *rl_cookie; 1127 struct vn_io_fault_args args; 1128 int error; 1129 1130 doio = uio->uio_rw == UIO_READ ? vn_read : vn_write; 1131 vp = fp->f_vnode; 1132 foffset_lock_uio(fp, uio, flags); 1133 if (do_vn_io_fault(vp, uio)) { 1134 args.kind = VN_IO_FAULT_FOP; 1135 args.args.fop_args.fp = fp; 1136 args.args.fop_args.doio = doio; 1137 args.cred = active_cred; 1138 args.flags = flags | FOF_OFFSET; 1139 if (uio->uio_rw == UIO_READ) { 1140 rl_cookie = vn_rangelock_rlock(vp, uio->uio_offset, 1141 uio->uio_offset + uio->uio_resid); 1142 } else if ((fp->f_flag & O_APPEND) != 0 || 1143 (flags & FOF_OFFSET) == 0) { 1144 /* For appenders, punt and lock the whole range. */ 1145 rl_cookie = vn_rangelock_wlock(vp, 0, OFF_MAX); 1146 } else { 1147 rl_cookie = vn_rangelock_wlock(vp, uio->uio_offset, 1148 uio->uio_offset + uio->uio_resid); 1149 } 1150 error = vn_io_fault1(vp, uio, &args, td); 1151 vn_rangelock_unlock(vp, rl_cookie); 1152 } else { 1153 error = doio(fp, uio, active_cred, flags | FOF_OFFSET, td); 1154 } 1155 foffset_unlock_uio(fp, uio, flags); 1156 return (error); 1157} 1158 1159/* 1160 * Helper function to perform the requested uiomove operation using 1161 * the held pages for io->uio_iov[0].iov_base buffer instead of 1162 * copyin/copyout. Access to the pages with uiomove_fromphys() 1163 * instead of iov_base prevents page faults that could occur due to 1164 * pmap_collect() invalidating the mapping created by 1165 * vm_fault_quick_hold_pages(), or pageout daemon, page laundry or 1166 * object cleanup revoking the write access from page mappings. 1167 * 1168 * Filesystems specified MNTK_NO_IOPF shall use vn_io_fault_uiomove() 1169 * instead of plain uiomove(). 1170 */ 1171int 1172vn_io_fault_uiomove(char *data, int xfersize, struct uio *uio) 1173{ 1174 struct uio transp_uio; 1175 struct iovec transp_iov[1]; 1176 struct thread *td; 1177 size_t adv; 1178 int error, pgadv; 1179 1180 td = curthread; 1181 if ((td->td_pflags & TDP_UIOHELD) == 0 || 1182 uio->uio_segflg != UIO_USERSPACE) 1183 return (uiomove(data, xfersize, uio)); 1184 1185 KASSERT(uio->uio_iovcnt == 1, ("uio_iovcnt %d", uio->uio_iovcnt)); 1186 transp_iov[0].iov_base = data; 1187 transp_uio.uio_iov = &transp_iov[0]; 1188 transp_uio.uio_iovcnt = 1; 1189 if (xfersize > uio->uio_resid) 1190 xfersize = uio->uio_resid; 1191 transp_uio.uio_resid = transp_iov[0].iov_len = xfersize; 1192 transp_uio.uio_offset = 0; 1193 transp_uio.uio_segflg = UIO_SYSSPACE; 1194 /* 1195 * Since transp_iov points to data, and td_ma page array 1196 * corresponds to original uio->uio_iov, we need to invert the 1197 * direction of the i/o operation as passed to 1198 * uiomove_fromphys(). 1199 */ 1200 switch (uio->uio_rw) { 1201 case UIO_WRITE: 1202 transp_uio.uio_rw = UIO_READ; 1203 break; 1204 case UIO_READ: 1205 transp_uio.uio_rw = UIO_WRITE; 1206 break; 1207 } 1208 transp_uio.uio_td = uio->uio_td; 1209 error = uiomove_fromphys(td->td_ma, 1210 ((vm_offset_t)uio->uio_iov->iov_base) & PAGE_MASK, 1211 xfersize, &transp_uio); 1212 adv = xfersize - transp_uio.uio_resid; 1213 pgadv = 1214 (((vm_offset_t)uio->uio_iov->iov_base + adv) >> PAGE_SHIFT) - 1215 (((vm_offset_t)uio->uio_iov->iov_base) >> PAGE_SHIFT); 1216 td->td_ma += pgadv; 1217 KASSERT(td->td_ma_cnt >= pgadv, ("consumed pages %d %d", td->td_ma_cnt, 1218 pgadv)); 1219 td->td_ma_cnt -= pgadv; 1220 uio->uio_iov->iov_base = (char *)uio->uio_iov->iov_base + adv; 1221 uio->uio_iov->iov_len -= adv; 1222 uio->uio_resid -= adv; 1223 uio->uio_offset += adv; 1224 return (error); 1225} 1226 1227int 1228vn_io_fault_pgmove(vm_page_t ma[], vm_offset_t offset, int xfersize, 1229 struct uio *uio) 1230{ 1231 struct thread *td; 1232 vm_offset_t iov_base; 1233 int cnt, pgadv; 1234 1235 td = curthread; 1236 if ((td->td_pflags & TDP_UIOHELD) == 0 || 1237 uio->uio_segflg != UIO_USERSPACE) 1238 return (uiomove_fromphys(ma, offset, xfersize, uio)); 1239 1240 KASSERT(uio->uio_iovcnt == 1, ("uio_iovcnt %d", uio->uio_iovcnt)); 1241 cnt = xfersize > uio->uio_resid ? uio->uio_resid : xfersize; 1242 iov_base = (vm_offset_t)uio->uio_iov->iov_base; 1243 switch (uio->uio_rw) { 1244 case UIO_WRITE: 1245 pmap_copy_pages(td->td_ma, iov_base & PAGE_MASK, ma, 1246 offset, cnt); 1247 break; 1248 case UIO_READ: 1249 pmap_copy_pages(ma, offset, td->td_ma, iov_base & PAGE_MASK, 1250 cnt); 1251 break; 1252 } 1253 pgadv = ((iov_base + cnt) >> PAGE_SHIFT) - (iov_base >> PAGE_SHIFT); 1254 td->td_ma += pgadv; 1255 KASSERT(td->td_ma_cnt >= pgadv, ("consumed pages %d %d", td->td_ma_cnt, 1256 pgadv)); 1257 td->td_ma_cnt -= pgadv; 1258 uio->uio_iov->iov_base = (char *)(iov_base + cnt); 1259 uio->uio_iov->iov_len -= cnt; 1260 uio->uio_resid -= cnt; 1261 uio->uio_offset += cnt; 1262 return (0); 1263} 1264 1265 1266/* 1267 * File table truncate routine. 1268 */ 1269static int 1270vn_truncate(struct file *fp, off_t length, struct ucred *active_cred, 1271 struct thread *td) 1272{ 1273 struct vattr vattr; 1274 struct mount *mp; 1275 struct vnode *vp; 1276 void *rl_cookie; 1277 int error; 1278 1279 vp = fp->f_vnode; 1280 1281 /* 1282 * Lock the whole range for truncation. Otherwise split i/o 1283 * might happen partly before and partly after the truncation. 1284 */ 1285 rl_cookie = vn_rangelock_wlock(vp, 0, OFF_MAX); 1286 error = vn_start_write(vp, &mp, V_WAIT | PCATCH); 1287 if (error) 1288 goto out1; 1289 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1290 if (vp->v_type == VDIR) { 1291 error = EISDIR; 1292 goto out; 1293 } 1294#ifdef MAC 1295 error = mac_vnode_check_write(active_cred, fp->f_cred, vp); 1296 if (error) 1297 goto out; 1298#endif 1299 error = vn_writechk(vp); 1300 if (error == 0) { 1301 VATTR_NULL(&vattr); 1302 vattr.va_size = length; 1303 error = VOP_SETATTR(vp, &vattr, fp->f_cred); 1304 } 1305out: 1306 VOP_UNLOCK(vp, 0); 1307 vn_finished_write(mp); 1308out1: 1309 vn_rangelock_unlock(vp, rl_cookie); 1310 return (error); 1311} 1312 1313/* 1314 * File table vnode stat routine. 1315 */ 1316static int 1317vn_statfile(fp, sb, active_cred, td) 1318 struct file *fp; 1319 struct stat *sb; 1320 struct ucred *active_cred; 1321 struct thread *td; 1322{ 1323 struct vnode *vp = fp->f_vnode; 1324 int error; 1325 1326 vn_lock(vp, LK_SHARED | LK_RETRY); 1327 error = vn_stat(vp, sb, active_cred, fp->f_cred, td); 1328 VOP_UNLOCK(vp, 0); 1329 1330 return (error); 1331} 1332 1333/* 1334 * Stat a vnode; implementation for the stat syscall 1335 */ 1336int 1337vn_stat(vp, sb, active_cred, file_cred, td) 1338 struct vnode *vp; 1339 register struct stat *sb; 1340 struct ucred *active_cred; 1341 struct ucred *file_cred; 1342 struct thread *td; 1343{ 1344 struct vattr vattr; 1345 register struct vattr *vap; 1346 int error; 1347 u_short mode; 1348 1349#ifdef MAC 1350 error = mac_vnode_check_stat(active_cred, file_cred, vp); 1351 if (error) 1352 return (error); 1353#endif 1354 1355 vap = &vattr; 1356 1357 /* 1358 * Initialize defaults for new and unusual fields, so that file 1359 * systems which don't support these fields don't need to know 1360 * about them. 1361 */ 1362 vap->va_birthtime.tv_sec = -1; 1363 vap->va_birthtime.tv_nsec = 0; 1364 vap->va_fsid = VNOVAL; 1365 vap->va_rdev = NODEV; 1366 1367 error = VOP_GETATTR(vp, vap, active_cred); 1368 if (error) 1369 return (error); 1370 1371 /* 1372 * Zero the spare stat fields 1373 */ 1374 bzero(sb, sizeof *sb); 1375 1376 /* 1377 * Copy from vattr table 1378 */ 1379 if (vap->va_fsid != VNOVAL) 1380 sb->st_dev = vap->va_fsid; 1381 else 1382 sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0]; 1383 sb->st_ino = vap->va_fileid; 1384 mode = vap->va_mode; 1385 switch (vap->va_type) { 1386 case VREG: 1387 mode |= S_IFREG; 1388 break; 1389 case VDIR: 1390 mode |= S_IFDIR; 1391 break; 1392 case VBLK: 1393 mode |= S_IFBLK; 1394 break; 1395 case VCHR: 1396 mode |= S_IFCHR; 1397 break; 1398 case VLNK: 1399 mode |= S_IFLNK; 1400 break; 1401 case VSOCK: 1402 mode |= S_IFSOCK; 1403 break; 1404 case VFIFO: 1405 mode |= S_IFIFO; 1406 break; 1407 default: 1408 return (EBADF); 1409 }; 1410 sb->st_mode = mode; 1411 sb->st_nlink = vap->va_nlink; 1412 sb->st_uid = vap->va_uid; 1413 sb->st_gid = vap->va_gid; 1414 sb->st_rdev = vap->va_rdev; 1415 if (vap->va_size > OFF_MAX) 1416 return (EOVERFLOW); 1417 sb->st_size = vap->va_size; 1418 sb->st_atim = vap->va_atime; 1419 sb->st_mtim = vap->va_mtime; 1420 sb->st_ctim = vap->va_ctime; 1421 sb->st_birthtim = vap->va_birthtime; 1422 1423 /* 1424 * According to www.opengroup.org, the meaning of st_blksize is 1425 * "a filesystem-specific preferred I/O block size for this 1426 * object. In some filesystem types, this may vary from file 1427 * to file" 1428 * Use miminum/default of PAGE_SIZE (e.g. for VCHR). 1429 */ 1430 1431 sb->st_blksize = max(PAGE_SIZE, vap->va_blocksize); 1432 1433 sb->st_flags = vap->va_flags; 1434 if (priv_check(td, PRIV_VFS_GENERATION)) 1435 sb->st_gen = 0; 1436 else 1437 sb->st_gen = vap->va_gen; 1438 1439 sb->st_blocks = vap->va_bytes / S_BLKSIZE; 1440 return (0); 1441} 1442 1443/* 1444 * File table vnode ioctl routine. 1445 */ 1446static int 1447vn_ioctl(fp, com, data, active_cred, td) 1448 struct file *fp; 1449 u_long com; 1450 void *data; 1451 struct ucred *active_cred; 1452 struct thread *td; 1453{ 1454 struct vattr vattr; 1455 struct vnode *vp; 1456 int error; 1457 1458 vp = fp->f_vnode; 1459 switch (vp->v_type) { 1460 case VDIR: 1461 case VREG: 1462 switch (com) { 1463 case FIONREAD: 1464 vn_lock(vp, LK_SHARED | LK_RETRY); 1465 error = VOP_GETATTR(vp, &vattr, active_cred); 1466 VOP_UNLOCK(vp, 0); 1467 if (error == 0) 1468 *(int *)data = vattr.va_size - fp->f_offset; 1469 return (error); 1470 case FIONBIO: 1471 case FIOASYNC: 1472 return (0); 1473 default: 1474 return (VOP_IOCTL(vp, com, data, fp->f_flag, 1475 active_cred, td)); 1476 } 1477 default: 1478 return (ENOTTY); 1479 } 1480} 1481 1482/* 1483 * File table vnode poll routine. 1484 */ 1485static int 1486vn_poll(fp, events, active_cred, td) 1487 struct file *fp; 1488 int events; 1489 struct ucred *active_cred; 1490 struct thread *td; 1491{ 1492 struct vnode *vp; 1493 int error; 1494 1495 vp = fp->f_vnode; 1496#ifdef MAC 1497 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1498 error = mac_vnode_check_poll(active_cred, fp->f_cred, vp); 1499 VOP_UNLOCK(vp, 0); 1500 if (!error) 1501#endif 1502 1503 error = VOP_POLL(vp, events, fp->f_cred, td); 1504 return (error); 1505} 1506 1507/* 1508 * Acquire the requested lock and then check for validity. LK_RETRY 1509 * permits vn_lock to return doomed vnodes. 1510 */ 1511int 1512_vn_lock(struct vnode *vp, int flags, char *file, int line) 1513{ 1514 int error; 1515 1516 VNASSERT((flags & LK_TYPE_MASK) != 0, vp, 1517 ("vn_lock called with no locktype.")); 1518 do { 1519#ifdef DEBUG_VFS_LOCKS 1520 KASSERT(vp->v_holdcnt != 0, 1521 ("vn_lock %p: zero hold count", vp)); 1522#endif 1523 error = VOP_LOCK1(vp, flags, file, line); 1524 flags &= ~LK_INTERLOCK; /* Interlock is always dropped. */ 1525 KASSERT((flags & LK_RETRY) == 0 || error == 0, 1526 ("LK_RETRY set with incompatible flags (0x%x) or an error occured (%d)", 1527 flags, error)); 1528 /* 1529 * Callers specify LK_RETRY if they wish to get dead vnodes. 1530 * If RETRY is not set, we return ENOENT instead. 1531 */ 1532 if (error == 0 && vp->v_iflag & VI_DOOMED && 1533 (flags & LK_RETRY) == 0) { 1534 VOP_UNLOCK(vp, 0); 1535 error = ENOENT; 1536 break; 1537 } 1538 } while (flags & LK_RETRY && error != 0); 1539 return (error); 1540} 1541 1542/* 1543 * File table vnode close routine. 1544 */ 1545static int 1546vn_closefile(fp, td) 1547 struct file *fp; 1548 struct thread *td; 1549{ 1550 struct vnode *vp; 1551 struct flock lf; 1552 int error; 1553 1554 vp = fp->f_vnode; 1555 fp->f_ops = &badfileops; 1556 1557 if (fp->f_type == DTYPE_VNODE && fp->f_flag & FHASLOCK) 1558 vref(vp); 1559 1560 error = vn_close(vp, fp->f_flag, fp->f_cred, td); 1561 1562 if (fp->f_type == DTYPE_VNODE && fp->f_flag & FHASLOCK) { 1563 lf.l_whence = SEEK_SET; 1564 lf.l_start = 0; 1565 lf.l_len = 0; 1566 lf.l_type = F_UNLCK; 1567 (void) VOP_ADVLOCK(vp, fp, F_UNLCK, &lf, F_FLOCK); 1568 vrele(vp); 1569 } 1570 return (error); 1571} 1572 1573/* 1574 * Preparing to start a filesystem write operation. If the operation is 1575 * permitted, then we bump the count of operations in progress and 1576 * proceed. If a suspend request is in progress, we wait until the 1577 * suspension is over, and then proceed. 1578 */ 1579static int 1580vn_start_write_locked(struct mount *mp, int flags) 1581{ 1582 int error, mflags; 1583 1584 mtx_assert(MNT_MTX(mp), MA_OWNED); 1585 error = 0; 1586 1587 /* 1588 * Check on status of suspension. 1589 */ 1590 if ((curthread->td_pflags & TDP_IGNSUSP) == 0 || 1591 mp->mnt_susp_owner != curthread) { 1592 mflags = ((mp->mnt_vfc->vfc_flags & VFCF_SBDRY) != 0 ? 1593 (flags & PCATCH) : 0) | (PUSER - 1); 1594 while ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) { 1595 if (flags & V_NOWAIT) { 1596 error = EWOULDBLOCK; 1597 goto unlock; 1598 } 1599 error = msleep(&mp->mnt_flag, MNT_MTX(mp), mflags, 1600 "suspfs", 0); 1601 if (error) 1602 goto unlock; 1603 } 1604 } 1605 if (flags & V_XSLEEP) 1606 goto unlock; 1607 mp->mnt_writeopcount++; 1608unlock: 1609 if (error != 0 || (flags & V_XSLEEP) != 0) 1610 MNT_REL(mp); 1611 MNT_IUNLOCK(mp); 1612 return (error); 1613} 1614 1615int 1616vn_start_write(vp, mpp, flags) 1617 struct vnode *vp; 1618 struct mount **mpp; 1619 int flags; 1620{ 1621 struct mount *mp; 1622 int error; 1623 1624 error = 0; 1625 /* 1626 * If a vnode is provided, get and return the mount point that 1627 * to which it will write. 1628 */ 1629 if (vp != NULL) { 1630 if ((error = VOP_GETWRITEMOUNT(vp, mpp)) != 0) { 1631 *mpp = NULL; 1632 if (error != EOPNOTSUPP) 1633 return (error); 1634 return (0); 1635 } 1636 } 1637 if ((mp = *mpp) == NULL) 1638 return (0); 1639 1640 /* 1641 * VOP_GETWRITEMOUNT() returns with the mp refcount held through 1642 * a vfs_ref(). 1643 * As long as a vnode is not provided we need to acquire a 1644 * refcount for the provided mountpoint too, in order to 1645 * emulate a vfs_ref(). 1646 */ 1647 MNT_ILOCK(mp); 1648 if (vp == NULL) 1649 MNT_REF(mp); 1650 1651 return (vn_start_write_locked(mp, flags)); 1652} 1653 1654/* 1655 * Secondary suspension. Used by operations such as vop_inactive 1656 * routines that are needed by the higher level functions. These 1657 * are allowed to proceed until all the higher level functions have 1658 * completed (indicated by mnt_writeopcount dropping to zero). At that 1659 * time, these operations are halted until the suspension is over. 1660 */ 1661int 1662vn_start_secondary_write(vp, mpp, flags) 1663 struct vnode *vp; 1664 struct mount **mpp; 1665 int flags; 1666{ 1667 struct mount *mp; 1668 int error; 1669 1670 retry: 1671 if (vp != NULL) { 1672 if ((error = VOP_GETWRITEMOUNT(vp, mpp)) != 0) { 1673 *mpp = NULL; 1674 if (error != EOPNOTSUPP) 1675 return (error); 1676 return (0); 1677 } 1678 } 1679 /* 1680 * If we are not suspended or have not yet reached suspended 1681 * mode, then let the operation proceed. 1682 */ 1683 if ((mp = *mpp) == NULL) 1684 return (0); 1685 1686 /* 1687 * VOP_GETWRITEMOUNT() returns with the mp refcount held through 1688 * a vfs_ref(). 1689 * As long as a vnode is not provided we need to acquire a 1690 * refcount for the provided mountpoint too, in order to 1691 * emulate a vfs_ref(). 1692 */ 1693 MNT_ILOCK(mp); 1694 if (vp == NULL) 1695 MNT_REF(mp); 1696 if ((mp->mnt_kern_flag & (MNTK_SUSPENDED | MNTK_SUSPEND2)) == 0) { 1697 mp->mnt_secondary_writes++; 1698 mp->mnt_secondary_accwrites++; 1699 MNT_IUNLOCK(mp); 1700 return (0); 1701 } 1702 if (flags & V_NOWAIT) { 1703 MNT_REL(mp); 1704 MNT_IUNLOCK(mp); 1705 return (EWOULDBLOCK); 1706 } 1707 /* 1708 * Wait for the suspension to finish. 1709 */ 1710 error = msleep(&mp->mnt_flag, MNT_MTX(mp), (PUSER - 1) | PDROP | 1711 ((mp->mnt_vfc->vfc_flags & VFCF_SBDRY) != 0 ? (flags & PCATCH) : 0), 1712 "suspfs", 0); 1713 vfs_rel(mp); 1714 if (error == 0) 1715 goto retry; 1716 return (error); 1717} 1718 1719/* 1720 * Filesystem write operation has completed. If we are suspending and this 1721 * operation is the last one, notify the suspender that the suspension is 1722 * now in effect. 1723 */ 1724void 1725vn_finished_write(mp) 1726 struct mount *mp; 1727{ 1728 if (mp == NULL) 1729 return; 1730 MNT_ILOCK(mp); 1731 MNT_REL(mp); 1732 mp->mnt_writeopcount--; 1733 if (mp->mnt_writeopcount < 0) 1734 panic("vn_finished_write: neg cnt"); 1735 if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 && 1736 mp->mnt_writeopcount <= 0) 1737 wakeup(&mp->mnt_writeopcount); 1738 MNT_IUNLOCK(mp); 1739} 1740 1741 1742/* 1743 * Filesystem secondary write operation has completed. If we are 1744 * suspending and this operation is the last one, notify the suspender 1745 * that the suspension is now in effect. 1746 */ 1747void 1748vn_finished_secondary_write(mp) 1749 struct mount *mp; 1750{ 1751 if (mp == NULL) 1752 return; 1753 MNT_ILOCK(mp); 1754 MNT_REL(mp); 1755 mp->mnt_secondary_writes--; 1756 if (mp->mnt_secondary_writes < 0) 1757 panic("vn_finished_secondary_write: neg cnt"); 1758 if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 && 1759 mp->mnt_secondary_writes <= 0) 1760 wakeup(&mp->mnt_secondary_writes); 1761 MNT_IUNLOCK(mp); 1762} 1763 1764 1765 1766/* 1767 * Request a filesystem to suspend write operations. 1768 */ 1769int 1770vfs_write_suspend(struct mount *mp, int flags) 1771{ 1772 int error; 1773 1774 MNT_ILOCK(mp); 1775 if (mp->mnt_susp_owner == curthread) { 1776 MNT_IUNLOCK(mp); 1777 return (EALREADY); 1778 } 1779 while (mp->mnt_kern_flag & MNTK_SUSPEND) 1780 msleep(&mp->mnt_flag, MNT_MTX(mp), PUSER - 1, "wsuspfs", 0); 1781 1782 /* 1783 * Unmount holds a write reference on the mount point. If we 1784 * own busy reference and drain for writers, we deadlock with 1785 * the reference draining in the unmount path. Callers of 1786 * vfs_write_suspend() must specify VS_SKIP_UNMOUNT if 1787 * vfs_busy() reference is owned and caller is not in the 1788 * unmount context. 1789 */ 1790 if ((flags & VS_SKIP_UNMOUNT) != 0 && 1791 (mp->mnt_kern_flag & MNTK_UNMOUNT) != 0) { 1792 MNT_IUNLOCK(mp); 1793 return (EBUSY); 1794 } 1795 1796 mp->mnt_kern_flag |= MNTK_SUSPEND; 1797 mp->mnt_susp_owner = curthread; 1798 if (mp->mnt_writeopcount > 0) 1799 (void) msleep(&mp->mnt_writeopcount, 1800 MNT_MTX(mp), (PUSER - 1)|PDROP, "suspwt", 0); 1801 else 1802 MNT_IUNLOCK(mp); 1803 if ((error = VFS_SYNC(mp, MNT_SUSPEND)) != 0) 1804 vfs_write_resume(mp, 0); 1805 return (error); 1806} 1807 1808/* 1809 * Request a filesystem to resume write operations. 1810 */ 1811void 1812vfs_write_resume(struct mount *mp, int flags) 1813{ 1814 1815 MNT_ILOCK(mp); 1816 if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) { 1817 KASSERT(mp->mnt_susp_owner == curthread, ("mnt_susp_owner")); 1818 mp->mnt_kern_flag &= ~(MNTK_SUSPEND | MNTK_SUSPEND2 | 1819 MNTK_SUSPENDED); 1820 mp->mnt_susp_owner = NULL; 1821 wakeup(&mp->mnt_writeopcount); 1822 wakeup(&mp->mnt_flag); 1823 curthread->td_pflags &= ~TDP_IGNSUSP; 1824 if ((flags & VR_START_WRITE) != 0) { 1825 MNT_REF(mp); 1826 mp->mnt_writeopcount++; 1827 } 1828 MNT_IUNLOCK(mp); 1829 if ((flags & VR_NO_SUSPCLR) == 0) 1830 VFS_SUSP_CLEAN(mp); 1831 } else if ((flags & VR_START_WRITE) != 0) { 1832 MNT_REF(mp); 1833 vn_start_write_locked(mp, 0); 1834 } else { 1835 MNT_IUNLOCK(mp); 1836 } 1837} 1838 1839/* 1840 * Helper loop around vfs_write_suspend() for filesystem unmount VFS 1841 * methods. 1842 */ 1843int 1844vfs_write_suspend_umnt(struct mount *mp) 1845{ 1846 int error; 1847 1848 KASSERT((curthread->td_pflags & TDP_IGNSUSP) == 0, 1849 ("vfs_write_suspend_umnt: recursed")); 1850 1851 /* dounmount() already called vn_start_write(). */ 1852 for (;;) { 1853 vn_finished_write(mp); 1854 error = vfs_write_suspend(mp, 0); 1855 if (error != 0) { 1856 vn_start_write(NULL, &mp, V_WAIT); 1857 return (error); 1858 } 1859 MNT_ILOCK(mp); 1860 if ((mp->mnt_kern_flag & MNTK_SUSPENDED) != 0) 1861 break; 1862 MNT_IUNLOCK(mp); 1863 vn_start_write(NULL, &mp, V_WAIT); 1864 } 1865 mp->mnt_kern_flag &= ~(MNTK_SUSPENDED | MNTK_SUSPEND2); 1866 wakeup(&mp->mnt_flag); 1867 MNT_IUNLOCK(mp); 1868 curthread->td_pflags |= TDP_IGNSUSP; 1869 return (0); 1870} 1871 1872/* 1873 * Implement kqueues for files by translating it to vnode operation. 1874 */ 1875static int 1876vn_kqfilter(struct file *fp, struct knote *kn) 1877{ 1878 1879 return (VOP_KQFILTER(fp->f_vnode, kn)); 1880} 1881 1882/* 1883 * Simplified in-kernel wrapper calls for extended attribute access. 1884 * Both calls pass in a NULL credential, authorizing as "kernel" access. 1885 * Set IO_NODELOCKED in ioflg if the vnode is already locked. 1886 */ 1887int 1888vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace, 1889 const char *attrname, int *buflen, char *buf, struct thread *td) 1890{ 1891 struct uio auio; 1892 struct iovec iov; 1893 int error; 1894 1895 iov.iov_len = *buflen; 1896 iov.iov_base = buf; 1897 1898 auio.uio_iov = &iov; 1899 auio.uio_iovcnt = 1; 1900 auio.uio_rw = UIO_READ; 1901 auio.uio_segflg = UIO_SYSSPACE; 1902 auio.uio_td = td; 1903 auio.uio_offset = 0; 1904 auio.uio_resid = *buflen; 1905 1906 if ((ioflg & IO_NODELOCKED) == 0) 1907 vn_lock(vp, LK_SHARED | LK_RETRY); 1908 1909 ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held"); 1910 1911 /* authorize attribute retrieval as kernel */ 1912 error = VOP_GETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, NULL, 1913 td); 1914 1915 if ((ioflg & IO_NODELOCKED) == 0) 1916 VOP_UNLOCK(vp, 0); 1917 1918 if (error == 0) { 1919 *buflen = *buflen - auio.uio_resid; 1920 } 1921 1922 return (error); 1923} 1924 1925/* 1926 * XXX failure mode if partially written? 1927 */ 1928int 1929vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace, 1930 const char *attrname, int buflen, char *buf, struct thread *td) 1931{ 1932 struct uio auio; 1933 struct iovec iov; 1934 struct mount *mp; 1935 int error; 1936 1937 iov.iov_len = buflen; 1938 iov.iov_base = buf; 1939 1940 auio.uio_iov = &iov; 1941 auio.uio_iovcnt = 1; 1942 auio.uio_rw = UIO_WRITE; 1943 auio.uio_segflg = UIO_SYSSPACE; 1944 auio.uio_td = td; 1945 auio.uio_offset = 0; 1946 auio.uio_resid = buflen; 1947 1948 if ((ioflg & IO_NODELOCKED) == 0) { 1949 if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0) 1950 return (error); 1951 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1952 } 1953 1954 ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held"); 1955 1956 /* authorize attribute setting as kernel */ 1957 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, td); 1958 1959 if ((ioflg & IO_NODELOCKED) == 0) { 1960 vn_finished_write(mp); 1961 VOP_UNLOCK(vp, 0); 1962 } 1963 1964 return (error); 1965} 1966 1967int 1968vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace, 1969 const char *attrname, struct thread *td) 1970{ 1971 struct mount *mp; 1972 int error; 1973 1974 if ((ioflg & IO_NODELOCKED) == 0) { 1975 if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0) 1976 return (error); 1977 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1978 } 1979 1980 ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held"); 1981 1982 /* authorize attribute removal as kernel */ 1983 error = VOP_DELETEEXTATTR(vp, attrnamespace, attrname, NULL, td); 1984 if (error == EOPNOTSUPP) 1985 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, NULL, 1986 NULL, td); 1987 1988 if ((ioflg & IO_NODELOCKED) == 0) { 1989 vn_finished_write(mp); 1990 VOP_UNLOCK(vp, 0); 1991 } 1992 1993 return (error); 1994} 1995 1996static int 1997vn_get_ino_alloc_vget(struct mount *mp, void *arg, int lkflags, 1998 struct vnode **rvp) 1999{ 2000 2001 return (VFS_VGET(mp, *(ino_t *)arg, lkflags, rvp)); 2002} 2003 2004int 2005vn_vget_ino(struct vnode *vp, ino_t ino, int lkflags, struct vnode **rvp) 2006{ 2007 2008 return (vn_vget_ino_gen(vp, vn_get_ino_alloc_vget, &ino, 2009 lkflags, rvp)); 2010} 2011 2012int 2013vn_vget_ino_gen(struct vnode *vp, vn_get_ino_t alloc, void *alloc_arg, 2014 int lkflags, struct vnode **rvp) 2015{ 2016 struct mount *mp; 2017 int ltype, error; 2018 2019 ASSERT_VOP_LOCKED(vp, "vn_vget_ino_get"); 2020 mp = vp->v_mount; 2021 ltype = VOP_ISLOCKED(vp); 2022 KASSERT(ltype == LK_EXCLUSIVE || ltype == LK_SHARED, 2023 ("vn_vget_ino: vp not locked")); 2024 error = vfs_busy(mp, MBF_NOWAIT); 2025 if (error != 0) { 2026 vfs_ref(mp); 2027 VOP_UNLOCK(vp, 0); 2028 error = vfs_busy(mp, 0); 2029 vn_lock(vp, ltype | LK_RETRY); 2030 vfs_rel(mp); 2031 if (error != 0) 2032 return (ENOENT); 2033 if (vp->v_iflag & VI_DOOMED) { 2034 vfs_unbusy(mp); 2035 return (ENOENT); 2036 } 2037 } 2038 VOP_UNLOCK(vp, 0); 2039 error = alloc(mp, alloc_arg, lkflags, rvp); 2040 vfs_unbusy(mp); 2041 if (*rvp != vp) 2042 vn_lock(vp, ltype | LK_RETRY); 2043 if (vp->v_iflag & VI_DOOMED) { 2044 if (error == 0) { 2045 if (*rvp == vp) 2046 vunref(vp); 2047 else 2048 vput(*rvp); 2049 } 2050 error = ENOENT; 2051 } 2052 return (error); 2053} 2054 2055int 2056vn_rlimit_fsize(const struct vnode *vp, const struct uio *uio, 2057 const struct thread *td) 2058{ 2059 2060 if (vp->v_type != VREG || td == NULL) 2061 return (0); 2062 PROC_LOCK(td->td_proc); 2063 if ((uoff_t)uio->uio_offset + uio->uio_resid > 2064 lim_cur(td->td_proc, RLIMIT_FSIZE)) { 2065 kern_psignal(td->td_proc, SIGXFSZ); 2066 PROC_UNLOCK(td->td_proc); 2067 return (EFBIG); 2068 } 2069 PROC_UNLOCK(td->td_proc); 2070 return (0); 2071} 2072 2073int 2074vn_chmod(struct file *fp, mode_t mode, struct ucred *active_cred, 2075 struct thread *td) 2076{ 2077 struct vnode *vp; 2078 2079 vp = fp->f_vnode; 2080#ifdef AUDIT 2081 vn_lock(vp, LK_SHARED | LK_RETRY); 2082 AUDIT_ARG_VNODE1(vp); 2083 VOP_UNLOCK(vp, 0); 2084#endif 2085 return (setfmode(td, active_cred, vp, mode)); 2086} 2087 2088int 2089vn_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred, 2090 struct thread *td) 2091{ 2092 struct vnode *vp; 2093 2094 vp = fp->f_vnode; 2095#ifdef AUDIT 2096 vn_lock(vp, LK_SHARED | LK_RETRY); 2097 AUDIT_ARG_VNODE1(vp); 2098 VOP_UNLOCK(vp, 0); 2099#endif 2100 return (setfown(td, active_cred, vp, uid, gid)); 2101} 2102 2103void 2104vn_pages_remove(struct vnode *vp, vm_pindex_t start, vm_pindex_t end) 2105{ 2106 vm_object_t object; 2107 2108 if ((object = vp->v_object) == NULL) 2109 return; 2110 VM_OBJECT_WLOCK(object); 2111 vm_object_page_remove(object, start, end, 0); 2112 VM_OBJECT_WUNLOCK(object); 2113} 2114 2115int 2116vn_bmap_seekhole(struct vnode *vp, u_long cmd, off_t *off, struct ucred *cred) 2117{ 2118 struct vattr va; 2119 daddr_t bn, bnp; 2120 uint64_t bsize; 2121 off_t noff; 2122 int error; 2123 2124 KASSERT(cmd == FIOSEEKHOLE || cmd == FIOSEEKDATA, 2125 ("Wrong command %lu", cmd)); 2126 2127 if (vn_lock(vp, LK_SHARED) != 0) 2128 return (EBADF); 2129 if (vp->v_type != VREG) { 2130 error = ENOTTY; 2131 goto unlock; 2132 } 2133 error = VOP_GETATTR(vp, &va, cred); 2134 if (error != 0) 2135 goto unlock; 2136 noff = *off; 2137 if (noff >= va.va_size) { 2138 error = ENXIO; 2139 goto unlock; 2140 } 2141 bsize = vp->v_mount->mnt_stat.f_iosize; 2142 for (bn = noff / bsize; noff < va.va_size; bn++, noff += bsize) { 2143 error = VOP_BMAP(vp, bn, NULL, &bnp, NULL, NULL); 2144 if (error == EOPNOTSUPP) { 2145 error = ENOTTY; 2146 goto unlock; 2147 } 2148 if ((bnp == -1 && cmd == FIOSEEKHOLE) || 2149 (bnp != -1 && cmd == FIOSEEKDATA)) { 2150 noff = bn * bsize; 2151 if (noff < *off) 2152 noff = *off; 2153 goto unlock; 2154 } 2155 } 2156 if (noff > va.va_size) 2157 noff = va.va_size; 2158 /* noff == va.va_size. There is an implicit hole at the end of file. */ 2159 if (cmd == FIOSEEKDATA) 2160 error = ENXIO; 2161unlock: 2162 VOP_UNLOCK(vp, 0); 2163 if (error == 0) 2164 *off = noff; 2165 return (error); 2166} 2167 2168int 2169vn_seek(struct file *fp, off_t offset, int whence, struct thread *td) 2170{ 2171 struct ucred *cred; 2172 struct vnode *vp; 2173 struct vattr vattr; 2174 off_t foffset, size; 2175 int error, noneg; 2176 2177 cred = td->td_ucred; 2178 vp = fp->f_vnode; 2179 foffset = foffset_lock(fp, 0); 2180 noneg = (vp->v_type != VCHR); 2181 error = 0; 2182 switch (whence) { 2183 case L_INCR: 2184 if (noneg && 2185 (foffset < 0 || 2186 (offset > 0 && foffset > OFF_MAX - offset))) { 2187 error = EOVERFLOW; 2188 break; 2189 } 2190 offset += foffset; 2191 break; 2192 case L_XTND: 2193 vn_lock(vp, LK_SHARED | LK_RETRY); 2194 error = VOP_GETATTR(vp, &vattr, cred); 2195 VOP_UNLOCK(vp, 0); 2196 if (error) 2197 break; 2198 2199 /* 2200 * If the file references a disk device, then fetch 2201 * the media size and use that to determine the ending 2202 * offset. 2203 */ 2204 if (vattr.va_size == 0 && vp->v_type == VCHR && 2205 fo_ioctl(fp, DIOCGMEDIASIZE, &size, cred, td) == 0) 2206 vattr.va_size = size; 2207 if (noneg && 2208 (vattr.va_size > OFF_MAX || 2209 (offset > 0 && vattr.va_size > OFF_MAX - offset))) { 2210 error = EOVERFLOW; 2211 break; 2212 } 2213 offset += vattr.va_size; 2214 break; 2215 case L_SET: 2216 break; 2217 case SEEK_DATA: 2218 error = fo_ioctl(fp, FIOSEEKDATA, &offset, cred, td); 2219 break; 2220 case SEEK_HOLE: 2221 error = fo_ioctl(fp, FIOSEEKHOLE, &offset, cred, td); 2222 break; 2223 default: 2224 error = EINVAL; 2225 } 2226 if (error == 0 && noneg && offset < 0) 2227 error = EINVAL; 2228 if (error != 0) 2229 goto drop; 2230 VFS_KNOTE_UNLOCKED(vp, 0); 2231 *(off_t *)(td->td_retval) = offset; 2232drop: 2233 foffset_unlock(fp, offset, error != 0 ? FOF_NOUPDATE : 0); 2234 return (error); 2235} 2236 2237int 2238vn_utimes_perm(struct vnode *vp, struct vattr *vap, struct ucred *cred, 2239 struct thread *td) 2240{ 2241 int error; 2242 2243 /* 2244 * Grant permission if the caller is the owner of the file, or 2245 * the super-user, or has ACL_WRITE_ATTRIBUTES permission on 2246 * on the file. If the time pointer is null, then write 2247 * permission on the file is also sufficient. 2248 * 2249 * From NFSv4.1, draft 21, 6.2.1.3.1, Discussion of Mask Attributes: 2250 * A user having ACL_WRITE_DATA or ACL_WRITE_ATTRIBUTES 2251 * will be allowed to set the times [..] to the current 2252 * server time. 2253 */ 2254 error = VOP_ACCESSX(vp, VWRITE_ATTRIBUTES, cred, td); 2255 if (error != 0 && (vap->va_vaflags & VA_UTIMES_NULL) != 0) 2256 error = VOP_ACCESS(vp, VWRITE, cred, td); 2257 return (error); 2258} 2259