vfs_vnops.c revision 276500
1/*- 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Copyright (c) 2012 Konstantin Belousov <kib@FreeBSD.org> 11 * Copyright (c) 2013, 2014 The FreeBSD Foundation 12 * 13 * Portions of this software were developed by Konstantin Belousov 14 * under sponsorship from the FreeBSD Foundation. 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions 18 * are met: 19 * 1. Redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer. 21 * 2. Redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * @(#)vfs_vnops.c 8.2 (Berkeley) 1/21/94 41 */ 42 43#include <sys/cdefs.h> 44__FBSDID("$FreeBSD: stable/10/sys/kern/vfs_vnops.c 276500 2015-01-01 10:44:20Z kib $"); 45 46#include <sys/param.h> 47#include <sys/systm.h> 48#include <sys/disk.h> 49#include <sys/fcntl.h> 50#include <sys/file.h> 51#include <sys/kdb.h> 52#include <sys/stat.h> 53#include <sys/priv.h> 54#include <sys/proc.h> 55#include <sys/limits.h> 56#include <sys/lock.h> 57#include <sys/mount.h> 58#include <sys/mutex.h> 59#include <sys/namei.h> 60#include <sys/vnode.h> 61#include <sys/bio.h> 62#include <sys/buf.h> 63#include <sys/filio.h> 64#include <sys/resourcevar.h> 65#include <sys/rwlock.h> 66#include <sys/sx.h> 67#include <sys/sysctl.h> 68#include <sys/ttycom.h> 69#include <sys/conf.h> 70#include <sys/syslog.h> 71#include <sys/unistd.h> 72 73#include <security/audit/audit.h> 74#include <security/mac/mac_framework.h> 75 76#include <vm/vm.h> 77#include <vm/vm_extern.h> 78#include <vm/pmap.h> 79#include <vm/vm_map.h> 80#include <vm/vm_object.h> 81#include <vm/vm_page.h> 82 83static fo_rdwr_t vn_read; 84static fo_rdwr_t vn_write; 85static fo_rdwr_t vn_io_fault; 86static fo_truncate_t vn_truncate; 87static fo_ioctl_t vn_ioctl; 88static fo_poll_t vn_poll; 89static fo_kqfilter_t vn_kqfilter; 90static fo_stat_t vn_statfile; 91static fo_close_t vn_closefile; 92 93struct fileops vnops = { 94 .fo_read = vn_io_fault, 95 .fo_write = vn_io_fault, 96 .fo_truncate = vn_truncate, 97 .fo_ioctl = vn_ioctl, 98 .fo_poll = vn_poll, 99 .fo_kqfilter = vn_kqfilter, 100 .fo_stat = vn_statfile, 101 .fo_close = vn_closefile, 102 .fo_chmod = vn_chmod, 103 .fo_chown = vn_chown, 104 .fo_sendfile = vn_sendfile, 105 .fo_seek = vn_seek, 106 .fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE 107}; 108 109static const int io_hold_cnt = 16; 110static int vn_io_fault_enable = 1; 111SYSCTL_INT(_debug, OID_AUTO, vn_io_fault_enable, CTLFLAG_RW, 112 &vn_io_fault_enable, 0, "Enable vn_io_fault lock avoidance"); 113static u_long vn_io_faults_cnt; 114SYSCTL_ULONG(_debug, OID_AUTO, vn_io_faults, CTLFLAG_RD, 115 &vn_io_faults_cnt, 0, "Count of vn_io_fault lock avoidance triggers"); 116 117/* 118 * Returns true if vn_io_fault mode of handling the i/o request should 119 * be used. 120 */ 121static bool 122do_vn_io_fault(struct vnode *vp, struct uio *uio) 123{ 124 struct mount *mp; 125 126 return (uio->uio_segflg == UIO_USERSPACE && vp->v_type == VREG && 127 (mp = vp->v_mount) != NULL && 128 (mp->mnt_kern_flag & MNTK_NO_IOPF) != 0 && vn_io_fault_enable); 129} 130 131/* 132 * Structure used to pass arguments to vn_io_fault1(), to do either 133 * file- or vnode-based I/O calls. 134 */ 135struct vn_io_fault_args { 136 enum { 137 VN_IO_FAULT_FOP, 138 VN_IO_FAULT_VOP 139 } kind; 140 struct ucred *cred; 141 int flags; 142 union { 143 struct fop_args_tag { 144 struct file *fp; 145 fo_rdwr_t *doio; 146 } fop_args; 147 struct vop_args_tag { 148 struct vnode *vp; 149 } vop_args; 150 } args; 151}; 152 153static int vn_io_fault1(struct vnode *vp, struct uio *uio, 154 struct vn_io_fault_args *args, struct thread *td); 155 156int 157vn_open(ndp, flagp, cmode, fp) 158 struct nameidata *ndp; 159 int *flagp, cmode; 160 struct file *fp; 161{ 162 struct thread *td = ndp->ni_cnd.cn_thread; 163 164 return (vn_open_cred(ndp, flagp, cmode, 0, td->td_ucred, fp)); 165} 166 167/* 168 * Common code for vnode open operations via a name lookup. 169 * Lookup the vnode and invoke VOP_CREATE if needed. 170 * Check permissions, and call the VOP_OPEN or VOP_CREATE routine. 171 * 172 * Note that this does NOT free nameidata for the successful case, 173 * due to the NDINIT being done elsewhere. 174 */ 175int 176vn_open_cred(struct nameidata *ndp, int *flagp, int cmode, u_int vn_open_flags, 177 struct ucred *cred, struct file *fp) 178{ 179 struct vnode *vp; 180 struct mount *mp; 181 struct thread *td = ndp->ni_cnd.cn_thread; 182 struct vattr vat; 183 struct vattr *vap = &vat; 184 int fmode, error; 185 186restart: 187 fmode = *flagp; 188 if (fmode & O_CREAT) { 189 ndp->ni_cnd.cn_nameiop = CREATE; 190 /* 191 * Set NOCACHE to avoid flushing the cache when 192 * rolling in many files at once. 193 */ 194 ndp->ni_cnd.cn_flags = ISOPEN | LOCKPARENT | LOCKLEAF | NOCACHE; 195 if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0) 196 ndp->ni_cnd.cn_flags |= FOLLOW; 197 if (!(vn_open_flags & VN_OPEN_NOAUDIT)) 198 ndp->ni_cnd.cn_flags |= AUDITVNODE1; 199 if (vn_open_flags & VN_OPEN_NOCAPCHECK) 200 ndp->ni_cnd.cn_flags |= NOCAPCHECK; 201 bwillwrite(); 202 if ((error = namei(ndp)) != 0) 203 return (error); 204 if (ndp->ni_vp == NULL) { 205 VATTR_NULL(vap); 206 vap->va_type = VREG; 207 vap->va_mode = cmode; 208 if (fmode & O_EXCL) 209 vap->va_vaflags |= VA_EXCLUSIVE; 210 if (vn_start_write(ndp->ni_dvp, &mp, V_NOWAIT) != 0) { 211 NDFREE(ndp, NDF_ONLY_PNBUF); 212 vput(ndp->ni_dvp); 213 if ((error = vn_start_write(NULL, &mp, 214 V_XSLEEP | PCATCH)) != 0) 215 return (error); 216 goto restart; 217 } 218#ifdef MAC 219 error = mac_vnode_check_create(cred, ndp->ni_dvp, 220 &ndp->ni_cnd, vap); 221 if (error == 0) 222#endif 223 error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp, 224 &ndp->ni_cnd, vap); 225 vput(ndp->ni_dvp); 226 vn_finished_write(mp); 227 if (error) { 228 NDFREE(ndp, NDF_ONLY_PNBUF); 229 return (error); 230 } 231 fmode &= ~O_TRUNC; 232 vp = ndp->ni_vp; 233 } else { 234 if (ndp->ni_dvp == ndp->ni_vp) 235 vrele(ndp->ni_dvp); 236 else 237 vput(ndp->ni_dvp); 238 ndp->ni_dvp = NULL; 239 vp = ndp->ni_vp; 240 if (fmode & O_EXCL) { 241 error = EEXIST; 242 goto bad; 243 } 244 fmode &= ~O_CREAT; 245 } 246 } else { 247 ndp->ni_cnd.cn_nameiop = LOOKUP; 248 ndp->ni_cnd.cn_flags = ISOPEN | 249 ((fmode & O_NOFOLLOW) ? NOFOLLOW : FOLLOW) | LOCKLEAF; 250 if (!(fmode & FWRITE)) 251 ndp->ni_cnd.cn_flags |= LOCKSHARED; 252 if (!(vn_open_flags & VN_OPEN_NOAUDIT)) 253 ndp->ni_cnd.cn_flags |= AUDITVNODE1; 254 if (vn_open_flags & VN_OPEN_NOCAPCHECK) 255 ndp->ni_cnd.cn_flags |= NOCAPCHECK; 256 if ((error = namei(ndp)) != 0) 257 return (error); 258 vp = ndp->ni_vp; 259 } 260 error = vn_open_vnode(vp, fmode, cred, td, fp); 261 if (error) 262 goto bad; 263 *flagp = fmode; 264 return (0); 265bad: 266 NDFREE(ndp, NDF_ONLY_PNBUF); 267 vput(vp); 268 *flagp = fmode; 269 ndp->ni_vp = NULL; 270 return (error); 271} 272 273/* 274 * Common code for vnode open operations once a vnode is located. 275 * Check permissions, and call the VOP_OPEN routine. 276 */ 277int 278vn_open_vnode(struct vnode *vp, int fmode, struct ucred *cred, 279 struct thread *td, struct file *fp) 280{ 281 struct mount *mp; 282 accmode_t accmode; 283 struct flock lf; 284 int error, have_flock, lock_flags, type; 285 286 if (vp->v_type == VLNK) 287 return (EMLINK); 288 if (vp->v_type == VSOCK) 289 return (EOPNOTSUPP); 290 if (vp->v_type != VDIR && fmode & O_DIRECTORY) 291 return (ENOTDIR); 292 accmode = 0; 293 if (fmode & (FWRITE | O_TRUNC)) { 294 if (vp->v_type == VDIR) 295 return (EISDIR); 296 accmode |= VWRITE; 297 } 298 if (fmode & FREAD) 299 accmode |= VREAD; 300 if (fmode & FEXEC) 301 accmode |= VEXEC; 302 if ((fmode & O_APPEND) && (fmode & FWRITE)) 303 accmode |= VAPPEND; 304#ifdef MAC 305 error = mac_vnode_check_open(cred, vp, accmode); 306 if (error) 307 return (error); 308#endif 309 if ((fmode & O_CREAT) == 0) { 310 if (accmode & VWRITE) { 311 error = vn_writechk(vp); 312 if (error) 313 return (error); 314 } 315 if (accmode) { 316 error = VOP_ACCESS(vp, accmode, cred, td); 317 if (error) 318 return (error); 319 } 320 } 321 if (vp->v_type == VFIFO && VOP_ISLOCKED(vp) != LK_EXCLUSIVE) 322 vn_lock(vp, LK_UPGRADE | LK_RETRY); 323 if ((error = VOP_OPEN(vp, fmode, cred, td, fp)) != 0) 324 return (error); 325 326 if (fmode & (O_EXLOCK | O_SHLOCK)) { 327 KASSERT(fp != NULL, ("open with flock requires fp")); 328 lock_flags = VOP_ISLOCKED(vp); 329 VOP_UNLOCK(vp, 0); 330 lf.l_whence = SEEK_SET; 331 lf.l_start = 0; 332 lf.l_len = 0; 333 if (fmode & O_EXLOCK) 334 lf.l_type = F_WRLCK; 335 else 336 lf.l_type = F_RDLCK; 337 type = F_FLOCK; 338 if ((fmode & FNONBLOCK) == 0) 339 type |= F_WAIT; 340 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, type); 341 have_flock = (error == 0); 342 vn_lock(vp, lock_flags | LK_RETRY); 343 if (error == 0 && vp->v_iflag & VI_DOOMED) 344 error = ENOENT; 345 /* 346 * Another thread might have used this vnode as an 347 * executable while the vnode lock was dropped. 348 * Ensure the vnode is still able to be opened for 349 * writing after the lock has been obtained. 350 */ 351 if (error == 0 && accmode & VWRITE) 352 error = vn_writechk(vp); 353 if (error) { 354 VOP_UNLOCK(vp, 0); 355 if (have_flock) { 356 lf.l_whence = SEEK_SET; 357 lf.l_start = 0; 358 lf.l_len = 0; 359 lf.l_type = F_UNLCK; 360 (void) VOP_ADVLOCK(vp, fp, F_UNLCK, &lf, 361 F_FLOCK); 362 } 363 vn_start_write(vp, &mp, V_WAIT); 364 vn_lock(vp, lock_flags | LK_RETRY); 365 (void)VOP_CLOSE(vp, fmode, cred, td); 366 vn_finished_write(mp); 367 /* Prevent second close from fdrop()->vn_close(). */ 368 if (fp != NULL) 369 fp->f_ops= &badfileops; 370 return (error); 371 } 372 fp->f_flag |= FHASLOCK; 373 } 374 if (fmode & FWRITE) { 375 VOP_ADD_WRITECOUNT(vp, 1); 376 CTR3(KTR_VFS, "%s: vp %p v_writecount increased to %d", 377 __func__, vp, vp->v_writecount); 378 } 379 ASSERT_VOP_LOCKED(vp, "vn_open_vnode"); 380 return (0); 381} 382 383/* 384 * Check for write permissions on the specified vnode. 385 * Prototype text segments cannot be written. 386 */ 387int 388vn_writechk(vp) 389 register struct vnode *vp; 390{ 391 392 ASSERT_VOP_LOCKED(vp, "vn_writechk"); 393 /* 394 * If there's shared text associated with 395 * the vnode, try to free it up once. If 396 * we fail, we can't allow writing. 397 */ 398 if (VOP_IS_TEXT(vp)) 399 return (ETXTBSY); 400 401 return (0); 402} 403 404/* 405 * Vnode close call 406 */ 407int 408vn_close(vp, flags, file_cred, td) 409 register struct vnode *vp; 410 int flags; 411 struct ucred *file_cred; 412 struct thread *td; 413{ 414 struct mount *mp; 415 int error, lock_flags; 416 417 if (vp->v_type != VFIFO && (flags & FWRITE) == 0 && 418 MNT_EXTENDED_SHARED(vp->v_mount)) 419 lock_flags = LK_SHARED; 420 else 421 lock_flags = LK_EXCLUSIVE; 422 423 vn_start_write(vp, &mp, V_WAIT); 424 vn_lock(vp, lock_flags | LK_RETRY); 425 if (flags & FWRITE) { 426 VNASSERT(vp->v_writecount > 0, vp, 427 ("vn_close: negative writecount")); 428 VOP_ADD_WRITECOUNT(vp, -1); 429 CTR3(KTR_VFS, "%s: vp %p v_writecount decreased to %d", 430 __func__, vp, vp->v_writecount); 431 } 432 error = VOP_CLOSE(vp, flags, file_cred, td); 433 vput(vp); 434 vn_finished_write(mp); 435 return (error); 436} 437 438/* 439 * Heuristic to detect sequential operation. 440 */ 441static int 442sequential_heuristic(struct uio *uio, struct file *fp) 443{ 444 445 ASSERT_VOP_LOCKED(fp->f_vnode, __func__); 446 if (fp->f_flag & FRDAHEAD) 447 return (fp->f_seqcount << IO_SEQSHIFT); 448 449 /* 450 * Offset 0 is handled specially. open() sets f_seqcount to 1 so 451 * that the first I/O is normally considered to be slightly 452 * sequential. Seeking to offset 0 doesn't change sequentiality 453 * unless previous seeks have reduced f_seqcount to 0, in which 454 * case offset 0 is not special. 455 */ 456 if ((uio->uio_offset == 0 && fp->f_seqcount > 0) || 457 uio->uio_offset == fp->f_nextoff) { 458 /* 459 * f_seqcount is in units of fixed-size blocks so that it 460 * depends mainly on the amount of sequential I/O and not 461 * much on the number of sequential I/O's. The fixed size 462 * of 16384 is hard-coded here since it is (not quite) just 463 * a magic size that works well here. This size is more 464 * closely related to the best I/O size for real disks than 465 * to any block size used by software. 466 */ 467 fp->f_seqcount += howmany(uio->uio_resid, 16384); 468 if (fp->f_seqcount > IO_SEQMAX) 469 fp->f_seqcount = IO_SEQMAX; 470 return (fp->f_seqcount << IO_SEQSHIFT); 471 } 472 473 /* Not sequential. Quickly draw-down sequentiality. */ 474 if (fp->f_seqcount > 1) 475 fp->f_seqcount = 1; 476 else 477 fp->f_seqcount = 0; 478 return (0); 479} 480 481/* 482 * Package up an I/O request on a vnode into a uio and do it. 483 */ 484int 485vn_rdwr(enum uio_rw rw, struct vnode *vp, void *base, int len, off_t offset, 486 enum uio_seg segflg, int ioflg, struct ucred *active_cred, 487 struct ucred *file_cred, ssize_t *aresid, struct thread *td) 488{ 489 struct uio auio; 490 struct iovec aiov; 491 struct mount *mp; 492 struct ucred *cred; 493 void *rl_cookie; 494 struct vn_io_fault_args args; 495 int error, lock_flags; 496 497 auio.uio_iov = &aiov; 498 auio.uio_iovcnt = 1; 499 aiov.iov_base = base; 500 aiov.iov_len = len; 501 auio.uio_resid = len; 502 auio.uio_offset = offset; 503 auio.uio_segflg = segflg; 504 auio.uio_rw = rw; 505 auio.uio_td = td; 506 error = 0; 507 508 if ((ioflg & IO_NODELOCKED) == 0) { 509 if ((ioflg & IO_RANGELOCKED) == 0) { 510 if (rw == UIO_READ) { 511 rl_cookie = vn_rangelock_rlock(vp, offset, 512 offset + len); 513 } else { 514 rl_cookie = vn_rangelock_wlock(vp, offset, 515 offset + len); 516 } 517 } else 518 rl_cookie = NULL; 519 mp = NULL; 520 if (rw == UIO_WRITE) { 521 if (vp->v_type != VCHR && 522 (error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) 523 != 0) 524 goto out; 525 if (MNT_SHARED_WRITES(mp) || 526 ((mp == NULL) && MNT_SHARED_WRITES(vp->v_mount))) 527 lock_flags = LK_SHARED; 528 else 529 lock_flags = LK_EXCLUSIVE; 530 } else 531 lock_flags = LK_SHARED; 532 vn_lock(vp, lock_flags | LK_RETRY); 533 } else 534 rl_cookie = NULL; 535 536 ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held"); 537#ifdef MAC 538 if ((ioflg & IO_NOMACCHECK) == 0) { 539 if (rw == UIO_READ) 540 error = mac_vnode_check_read(active_cred, file_cred, 541 vp); 542 else 543 error = mac_vnode_check_write(active_cred, file_cred, 544 vp); 545 } 546#endif 547 if (error == 0) { 548 if (file_cred != NULL) 549 cred = file_cred; 550 else 551 cred = active_cred; 552 if (do_vn_io_fault(vp, &auio)) { 553 args.kind = VN_IO_FAULT_VOP; 554 args.cred = cred; 555 args.flags = ioflg; 556 args.args.vop_args.vp = vp; 557 error = vn_io_fault1(vp, &auio, &args, td); 558 } else if (rw == UIO_READ) { 559 error = VOP_READ(vp, &auio, ioflg, cred); 560 } else /* if (rw == UIO_WRITE) */ { 561 error = VOP_WRITE(vp, &auio, ioflg, cred); 562 } 563 } 564 if (aresid) 565 *aresid = auio.uio_resid; 566 else 567 if (auio.uio_resid && error == 0) 568 error = EIO; 569 if ((ioflg & IO_NODELOCKED) == 0) { 570 VOP_UNLOCK(vp, 0); 571 if (mp != NULL) 572 vn_finished_write(mp); 573 } 574 out: 575 if (rl_cookie != NULL) 576 vn_rangelock_unlock(vp, rl_cookie); 577 return (error); 578} 579 580/* 581 * Package up an I/O request on a vnode into a uio and do it. The I/O 582 * request is split up into smaller chunks and we try to avoid saturating 583 * the buffer cache while potentially holding a vnode locked, so we 584 * check bwillwrite() before calling vn_rdwr(). We also call kern_yield() 585 * to give other processes a chance to lock the vnode (either other processes 586 * core'ing the same binary, or unrelated processes scanning the directory). 587 */ 588int 589vn_rdwr_inchunks(rw, vp, base, len, offset, segflg, ioflg, active_cred, 590 file_cred, aresid, td) 591 enum uio_rw rw; 592 struct vnode *vp; 593 void *base; 594 size_t len; 595 off_t offset; 596 enum uio_seg segflg; 597 int ioflg; 598 struct ucred *active_cred; 599 struct ucred *file_cred; 600 size_t *aresid; 601 struct thread *td; 602{ 603 int error = 0; 604 ssize_t iaresid; 605 606 do { 607 int chunk; 608 609 /* 610 * Force `offset' to a multiple of MAXBSIZE except possibly 611 * for the first chunk, so that filesystems only need to 612 * write full blocks except possibly for the first and last 613 * chunks. 614 */ 615 chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE; 616 617 if (chunk > len) 618 chunk = len; 619 if (rw != UIO_READ && vp->v_type == VREG) 620 bwillwrite(); 621 iaresid = 0; 622 error = vn_rdwr(rw, vp, base, chunk, offset, segflg, 623 ioflg, active_cred, file_cred, &iaresid, td); 624 len -= chunk; /* aresid calc already includes length */ 625 if (error) 626 break; 627 offset += chunk; 628 base = (char *)base + chunk; 629 kern_yield(PRI_USER); 630 } while (len); 631 if (aresid) 632 *aresid = len + iaresid; 633 return (error); 634} 635 636off_t 637foffset_lock(struct file *fp, int flags) 638{ 639 struct mtx *mtxp; 640 off_t res; 641 642 KASSERT((flags & FOF_OFFSET) == 0, ("FOF_OFFSET passed")); 643 644#if OFF_MAX <= LONG_MAX 645 /* 646 * Caller only wants the current f_offset value. Assume that 647 * the long and shorter integer types reads are atomic. 648 */ 649 if ((flags & FOF_NOLOCK) != 0) 650 return (fp->f_offset); 651#endif 652 653 /* 654 * According to McKusick the vn lock was protecting f_offset here. 655 * It is now protected by the FOFFSET_LOCKED flag. 656 */ 657 mtxp = mtx_pool_find(mtxpool_sleep, fp); 658 mtx_lock(mtxp); 659 if ((flags & FOF_NOLOCK) == 0) { 660 while (fp->f_vnread_flags & FOFFSET_LOCKED) { 661 fp->f_vnread_flags |= FOFFSET_LOCK_WAITING; 662 msleep(&fp->f_vnread_flags, mtxp, PUSER -1, 663 "vofflock", 0); 664 } 665 fp->f_vnread_flags |= FOFFSET_LOCKED; 666 } 667 res = fp->f_offset; 668 mtx_unlock(mtxp); 669 return (res); 670} 671 672void 673foffset_unlock(struct file *fp, off_t val, int flags) 674{ 675 struct mtx *mtxp; 676 677 KASSERT((flags & FOF_OFFSET) == 0, ("FOF_OFFSET passed")); 678 679#if OFF_MAX <= LONG_MAX 680 if ((flags & FOF_NOLOCK) != 0) { 681 if ((flags & FOF_NOUPDATE) == 0) 682 fp->f_offset = val; 683 if ((flags & FOF_NEXTOFF) != 0) 684 fp->f_nextoff = val; 685 return; 686 } 687#endif 688 689 mtxp = mtx_pool_find(mtxpool_sleep, fp); 690 mtx_lock(mtxp); 691 if ((flags & FOF_NOUPDATE) == 0) 692 fp->f_offset = val; 693 if ((flags & FOF_NEXTOFF) != 0) 694 fp->f_nextoff = val; 695 if ((flags & FOF_NOLOCK) == 0) { 696 KASSERT((fp->f_vnread_flags & FOFFSET_LOCKED) != 0, 697 ("Lost FOFFSET_LOCKED")); 698 if (fp->f_vnread_flags & FOFFSET_LOCK_WAITING) 699 wakeup(&fp->f_vnread_flags); 700 fp->f_vnread_flags = 0; 701 } 702 mtx_unlock(mtxp); 703} 704 705void 706foffset_lock_uio(struct file *fp, struct uio *uio, int flags) 707{ 708 709 if ((flags & FOF_OFFSET) == 0) 710 uio->uio_offset = foffset_lock(fp, flags); 711} 712 713void 714foffset_unlock_uio(struct file *fp, struct uio *uio, int flags) 715{ 716 717 if ((flags & FOF_OFFSET) == 0) 718 foffset_unlock(fp, uio->uio_offset, flags); 719} 720 721static int 722get_advice(struct file *fp, struct uio *uio) 723{ 724 struct mtx *mtxp; 725 int ret; 726 727 ret = POSIX_FADV_NORMAL; 728 if (fp->f_advice == NULL) 729 return (ret); 730 731 mtxp = mtx_pool_find(mtxpool_sleep, fp); 732 mtx_lock(mtxp); 733 if (uio->uio_offset >= fp->f_advice->fa_start && 734 uio->uio_offset + uio->uio_resid <= fp->f_advice->fa_end) 735 ret = fp->f_advice->fa_advice; 736 mtx_unlock(mtxp); 737 return (ret); 738} 739 740/* 741 * File table vnode read routine. 742 */ 743static int 744vn_read(fp, uio, active_cred, flags, td) 745 struct file *fp; 746 struct uio *uio; 747 struct ucred *active_cred; 748 int flags; 749 struct thread *td; 750{ 751 struct vnode *vp; 752 struct mtx *mtxp; 753 int error, ioflag; 754 int advice; 755 off_t offset, start, end; 756 757 KASSERT(uio->uio_td == td, ("uio_td %p is not td %p", 758 uio->uio_td, td)); 759 KASSERT(flags & FOF_OFFSET, ("No FOF_OFFSET")); 760 vp = fp->f_vnode; 761 ioflag = 0; 762 if (fp->f_flag & FNONBLOCK) 763 ioflag |= IO_NDELAY; 764 if (fp->f_flag & O_DIRECT) 765 ioflag |= IO_DIRECT; 766 advice = get_advice(fp, uio); 767 vn_lock(vp, LK_SHARED | LK_RETRY); 768 769 switch (advice) { 770 case POSIX_FADV_NORMAL: 771 case POSIX_FADV_SEQUENTIAL: 772 case POSIX_FADV_NOREUSE: 773 ioflag |= sequential_heuristic(uio, fp); 774 break; 775 case POSIX_FADV_RANDOM: 776 /* Disable read-ahead for random I/O. */ 777 break; 778 } 779 offset = uio->uio_offset; 780 781#ifdef MAC 782 error = mac_vnode_check_read(active_cred, fp->f_cred, vp); 783 if (error == 0) 784#endif 785 error = VOP_READ(vp, uio, ioflag, fp->f_cred); 786 fp->f_nextoff = uio->uio_offset; 787 VOP_UNLOCK(vp, 0); 788 if (error == 0 && advice == POSIX_FADV_NOREUSE && 789 offset != uio->uio_offset) { 790 /* 791 * Use POSIX_FADV_DONTNEED to flush clean pages and 792 * buffers for the backing file after a 793 * POSIX_FADV_NOREUSE read(2). To optimize the common 794 * case of using POSIX_FADV_NOREUSE with sequential 795 * access, track the previous implicit DONTNEED 796 * request and grow this request to include the 797 * current read(2) in addition to the previous 798 * DONTNEED. With purely sequential access this will 799 * cause the DONTNEED requests to continously grow to 800 * cover all of the previously read regions of the 801 * file. This allows filesystem blocks that are 802 * accessed by multiple calls to read(2) to be flushed 803 * once the last read(2) finishes. 804 */ 805 start = offset; 806 end = uio->uio_offset - 1; 807 mtxp = mtx_pool_find(mtxpool_sleep, fp); 808 mtx_lock(mtxp); 809 if (fp->f_advice != NULL && 810 fp->f_advice->fa_advice == POSIX_FADV_NOREUSE) { 811 if (start != 0 && fp->f_advice->fa_prevend + 1 == start) 812 start = fp->f_advice->fa_prevstart; 813 else if (fp->f_advice->fa_prevstart != 0 && 814 fp->f_advice->fa_prevstart == end + 1) 815 end = fp->f_advice->fa_prevend; 816 fp->f_advice->fa_prevstart = start; 817 fp->f_advice->fa_prevend = end; 818 } 819 mtx_unlock(mtxp); 820 error = VOP_ADVISE(vp, start, end, POSIX_FADV_DONTNEED); 821 } 822 return (error); 823} 824 825/* 826 * File table vnode write routine. 827 */ 828static int 829vn_write(fp, uio, active_cred, flags, td) 830 struct file *fp; 831 struct uio *uio; 832 struct ucred *active_cred; 833 int flags; 834 struct thread *td; 835{ 836 struct vnode *vp; 837 struct mount *mp; 838 struct mtx *mtxp; 839 int error, ioflag, lock_flags; 840 int advice; 841 off_t offset, start, end; 842 843 KASSERT(uio->uio_td == td, ("uio_td %p is not td %p", 844 uio->uio_td, td)); 845 KASSERT(flags & FOF_OFFSET, ("No FOF_OFFSET")); 846 vp = fp->f_vnode; 847 if (vp->v_type == VREG) 848 bwillwrite(); 849 ioflag = IO_UNIT; 850 if (vp->v_type == VREG && (fp->f_flag & O_APPEND)) 851 ioflag |= IO_APPEND; 852 if (fp->f_flag & FNONBLOCK) 853 ioflag |= IO_NDELAY; 854 if (fp->f_flag & O_DIRECT) 855 ioflag |= IO_DIRECT; 856 if ((fp->f_flag & O_FSYNC) || 857 (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))) 858 ioflag |= IO_SYNC; 859 mp = NULL; 860 if (vp->v_type != VCHR && 861 (error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0) 862 goto unlock; 863 864 advice = get_advice(fp, uio); 865 866 if (MNT_SHARED_WRITES(mp) || 867 (mp == NULL && MNT_SHARED_WRITES(vp->v_mount))) { 868 lock_flags = LK_SHARED; 869 } else { 870 lock_flags = LK_EXCLUSIVE; 871 } 872 873 vn_lock(vp, lock_flags | LK_RETRY); 874 switch (advice) { 875 case POSIX_FADV_NORMAL: 876 case POSIX_FADV_SEQUENTIAL: 877 case POSIX_FADV_NOREUSE: 878 ioflag |= sequential_heuristic(uio, fp); 879 break; 880 case POSIX_FADV_RANDOM: 881 /* XXX: Is this correct? */ 882 break; 883 } 884 offset = uio->uio_offset; 885 886#ifdef MAC 887 error = mac_vnode_check_write(active_cred, fp->f_cred, vp); 888 if (error == 0) 889#endif 890 error = VOP_WRITE(vp, uio, ioflag, fp->f_cred); 891 fp->f_nextoff = uio->uio_offset; 892 VOP_UNLOCK(vp, 0); 893 if (vp->v_type != VCHR) 894 vn_finished_write(mp); 895 if (error == 0 && advice == POSIX_FADV_NOREUSE && 896 offset != uio->uio_offset) { 897 /* 898 * Use POSIX_FADV_DONTNEED to flush clean pages and 899 * buffers for the backing file after a 900 * POSIX_FADV_NOREUSE write(2). To optimize the 901 * common case of using POSIX_FADV_NOREUSE with 902 * sequential access, track the previous implicit 903 * DONTNEED request and grow this request to include 904 * the current write(2) in addition to the previous 905 * DONTNEED. With purely sequential access this will 906 * cause the DONTNEED requests to continously grow to 907 * cover all of the previously written regions of the 908 * file. 909 * 910 * Note that the blocks just written are almost 911 * certainly still dirty, so this only works when 912 * VOP_ADVISE() calls from subsequent writes push out 913 * the data written by this write(2) once the backing 914 * buffers are clean. However, as compared to forcing 915 * IO_DIRECT, this gives much saner behavior. Write 916 * clustering is still allowed, and clean pages are 917 * merely moved to the cache page queue rather than 918 * outright thrown away. This means a subsequent 919 * read(2) can still avoid hitting the disk if the 920 * pages have not been reclaimed. 921 * 922 * This does make POSIX_FADV_NOREUSE largely useless 923 * with non-sequential access. However, sequential 924 * access is the more common use case and the flag is 925 * merely advisory. 926 */ 927 start = offset; 928 end = uio->uio_offset - 1; 929 mtxp = mtx_pool_find(mtxpool_sleep, fp); 930 mtx_lock(mtxp); 931 if (fp->f_advice != NULL && 932 fp->f_advice->fa_advice == POSIX_FADV_NOREUSE) { 933 if (start != 0 && fp->f_advice->fa_prevend + 1 == start) 934 start = fp->f_advice->fa_prevstart; 935 else if (fp->f_advice->fa_prevstart != 0 && 936 fp->f_advice->fa_prevstart == end + 1) 937 end = fp->f_advice->fa_prevend; 938 fp->f_advice->fa_prevstart = start; 939 fp->f_advice->fa_prevend = end; 940 } 941 mtx_unlock(mtxp); 942 error = VOP_ADVISE(vp, start, end, POSIX_FADV_DONTNEED); 943 } 944 945unlock: 946 return (error); 947} 948 949/* 950 * The vn_io_fault() is a wrapper around vn_read() and vn_write() to 951 * prevent the following deadlock: 952 * 953 * Assume that the thread A reads from the vnode vp1 into userspace 954 * buffer buf1 backed by the pages of vnode vp2. If a page in buf1 is 955 * currently not resident, then system ends up with the call chain 956 * vn_read() -> VOP_READ(vp1) -> uiomove() -> [Page Fault] -> 957 * vm_fault(buf1) -> vnode_pager_getpages(vp2) -> VOP_GETPAGES(vp2) 958 * which establishes lock order vp1->vn_lock, then vp2->vn_lock. 959 * If, at the same time, thread B reads from vnode vp2 into buffer buf2 960 * backed by the pages of vnode vp1, and some page in buf2 is not 961 * resident, we get a reversed order vp2->vn_lock, then vp1->vn_lock. 962 * 963 * To prevent the lock order reversal and deadlock, vn_io_fault() does 964 * not allow page faults to happen during VOP_READ() or VOP_WRITE(). 965 * Instead, it first tries to do the whole range i/o with pagefaults 966 * disabled. If all pages in the i/o buffer are resident and mapped, 967 * VOP will succeed (ignoring the genuine filesystem errors). 968 * Otherwise, we get back EFAULT, and vn_io_fault() falls back to do 969 * i/o in chunks, with all pages in the chunk prefaulted and held 970 * using vm_fault_quick_hold_pages(). 971 * 972 * Filesystems using this deadlock avoidance scheme should use the 973 * array of the held pages from uio, saved in the curthread->td_ma, 974 * instead of doing uiomove(). A helper function 975 * vn_io_fault_uiomove() converts uiomove request into 976 * uiomove_fromphys() over td_ma array. 977 * 978 * Since vnode locks do not cover the whole i/o anymore, rangelocks 979 * make the current i/o request atomic with respect to other i/os and 980 * truncations. 981 */ 982 983/* 984 * Decode vn_io_fault_args and perform the corresponding i/o. 985 */ 986static int 987vn_io_fault_doio(struct vn_io_fault_args *args, struct uio *uio, 988 struct thread *td) 989{ 990 991 switch (args->kind) { 992 case VN_IO_FAULT_FOP: 993 return ((args->args.fop_args.doio)(args->args.fop_args.fp, 994 uio, args->cred, args->flags, td)); 995 case VN_IO_FAULT_VOP: 996 if (uio->uio_rw == UIO_READ) { 997 return (VOP_READ(args->args.vop_args.vp, uio, 998 args->flags, args->cred)); 999 } else if (uio->uio_rw == UIO_WRITE) { 1000 return (VOP_WRITE(args->args.vop_args.vp, uio, 1001 args->flags, args->cred)); 1002 } 1003 break; 1004 } 1005 panic("vn_io_fault_doio: unknown kind of io %d %d", args->kind, 1006 uio->uio_rw); 1007} 1008 1009/* 1010 * Common code for vn_io_fault(), agnostic to the kind of i/o request. 1011 * Uses vn_io_fault_doio() to make the call to an actual i/o function. 1012 * Used from vn_rdwr() and vn_io_fault(), which encode the i/o request 1013 * into args and call vn_io_fault1() to handle faults during the user 1014 * mode buffer accesses. 1015 */ 1016static int 1017vn_io_fault1(struct vnode *vp, struct uio *uio, struct vn_io_fault_args *args, 1018 struct thread *td) 1019{ 1020 vm_page_t ma[io_hold_cnt + 2]; 1021 struct uio *uio_clone, short_uio; 1022 struct iovec short_iovec[1]; 1023 vm_page_t *prev_td_ma; 1024 vm_prot_t prot; 1025 vm_offset_t addr, end; 1026 size_t len, resid; 1027 ssize_t adv; 1028 int error, cnt, save, saveheld, prev_td_ma_cnt; 1029 1030 prot = uio->uio_rw == UIO_READ ? VM_PROT_WRITE : VM_PROT_READ; 1031 1032 /* 1033 * The UFS follows IO_UNIT directive and replays back both 1034 * uio_offset and uio_resid if an error is encountered during the 1035 * operation. But, since the iovec may be already advanced, 1036 * uio is still in an inconsistent state. 1037 * 1038 * Cache a copy of the original uio, which is advanced to the redo 1039 * point using UIO_NOCOPY below. 1040 */ 1041 uio_clone = cloneuio(uio); 1042 resid = uio->uio_resid; 1043 1044 short_uio.uio_segflg = UIO_USERSPACE; 1045 short_uio.uio_rw = uio->uio_rw; 1046 short_uio.uio_td = uio->uio_td; 1047 1048 save = vm_fault_disable_pagefaults(); 1049 error = vn_io_fault_doio(args, uio, td); 1050 if (error != EFAULT) 1051 goto out; 1052 1053 atomic_add_long(&vn_io_faults_cnt, 1); 1054 uio_clone->uio_segflg = UIO_NOCOPY; 1055 uiomove(NULL, resid - uio->uio_resid, uio_clone); 1056 uio_clone->uio_segflg = uio->uio_segflg; 1057 1058 saveheld = curthread_pflags_set(TDP_UIOHELD); 1059 prev_td_ma = td->td_ma; 1060 prev_td_ma_cnt = td->td_ma_cnt; 1061 1062 while (uio_clone->uio_resid != 0) { 1063 len = uio_clone->uio_iov->iov_len; 1064 if (len == 0) { 1065 KASSERT(uio_clone->uio_iovcnt >= 1, 1066 ("iovcnt underflow")); 1067 uio_clone->uio_iov++; 1068 uio_clone->uio_iovcnt--; 1069 continue; 1070 } 1071 if (len > io_hold_cnt * PAGE_SIZE) 1072 len = io_hold_cnt * PAGE_SIZE; 1073 addr = (uintptr_t)uio_clone->uio_iov->iov_base; 1074 end = round_page(addr + len); 1075 if (end < addr) { 1076 error = EFAULT; 1077 break; 1078 } 1079 cnt = atop(end - trunc_page(addr)); 1080 /* 1081 * A perfectly misaligned address and length could cause 1082 * both the start and the end of the chunk to use partial 1083 * page. +2 accounts for such a situation. 1084 */ 1085 cnt = vm_fault_quick_hold_pages(&td->td_proc->p_vmspace->vm_map, 1086 addr, len, prot, ma, io_hold_cnt + 2); 1087 if (cnt == -1) { 1088 error = EFAULT; 1089 break; 1090 } 1091 short_uio.uio_iov = &short_iovec[0]; 1092 short_iovec[0].iov_base = (void *)addr; 1093 short_uio.uio_iovcnt = 1; 1094 short_uio.uio_resid = short_iovec[0].iov_len = len; 1095 short_uio.uio_offset = uio_clone->uio_offset; 1096 td->td_ma = ma; 1097 td->td_ma_cnt = cnt; 1098 1099 error = vn_io_fault_doio(args, &short_uio, td); 1100 vm_page_unhold_pages(ma, cnt); 1101 adv = len - short_uio.uio_resid; 1102 1103 uio_clone->uio_iov->iov_base = 1104 (char *)uio_clone->uio_iov->iov_base + adv; 1105 uio_clone->uio_iov->iov_len -= adv; 1106 uio_clone->uio_resid -= adv; 1107 uio_clone->uio_offset += adv; 1108 1109 uio->uio_resid -= adv; 1110 uio->uio_offset += adv; 1111 1112 if (error != 0 || adv == 0) 1113 break; 1114 } 1115 td->td_ma = prev_td_ma; 1116 td->td_ma_cnt = prev_td_ma_cnt; 1117 curthread_pflags_restore(saveheld); 1118out: 1119 vm_fault_enable_pagefaults(save); 1120 free(uio_clone, M_IOV); 1121 return (error); 1122} 1123 1124static int 1125vn_io_fault(struct file *fp, struct uio *uio, struct ucred *active_cred, 1126 int flags, struct thread *td) 1127{ 1128 fo_rdwr_t *doio; 1129 struct vnode *vp; 1130 void *rl_cookie; 1131 struct vn_io_fault_args args; 1132 int error; 1133 1134 doio = uio->uio_rw == UIO_READ ? vn_read : vn_write; 1135 vp = fp->f_vnode; 1136 foffset_lock_uio(fp, uio, flags); 1137 if (do_vn_io_fault(vp, uio)) { 1138 args.kind = VN_IO_FAULT_FOP; 1139 args.args.fop_args.fp = fp; 1140 args.args.fop_args.doio = doio; 1141 args.cred = active_cred; 1142 args.flags = flags | FOF_OFFSET; 1143 if (uio->uio_rw == UIO_READ) { 1144 rl_cookie = vn_rangelock_rlock(vp, uio->uio_offset, 1145 uio->uio_offset + uio->uio_resid); 1146 } else if ((fp->f_flag & O_APPEND) != 0 || 1147 (flags & FOF_OFFSET) == 0) { 1148 /* For appenders, punt and lock the whole range. */ 1149 rl_cookie = vn_rangelock_wlock(vp, 0, OFF_MAX); 1150 } else { 1151 rl_cookie = vn_rangelock_wlock(vp, uio->uio_offset, 1152 uio->uio_offset + uio->uio_resid); 1153 } 1154 error = vn_io_fault1(vp, uio, &args, td); 1155 vn_rangelock_unlock(vp, rl_cookie); 1156 } else { 1157 error = doio(fp, uio, active_cred, flags | FOF_OFFSET, td); 1158 } 1159 foffset_unlock_uio(fp, uio, flags); 1160 return (error); 1161} 1162 1163/* 1164 * Helper function to perform the requested uiomove operation using 1165 * the held pages for io->uio_iov[0].iov_base buffer instead of 1166 * copyin/copyout. Access to the pages with uiomove_fromphys() 1167 * instead of iov_base prevents page faults that could occur due to 1168 * pmap_collect() invalidating the mapping created by 1169 * vm_fault_quick_hold_pages(), or pageout daemon, page laundry or 1170 * object cleanup revoking the write access from page mappings. 1171 * 1172 * Filesystems specified MNTK_NO_IOPF shall use vn_io_fault_uiomove() 1173 * instead of plain uiomove(). 1174 */ 1175int 1176vn_io_fault_uiomove(char *data, int xfersize, struct uio *uio) 1177{ 1178 struct uio transp_uio; 1179 struct iovec transp_iov[1]; 1180 struct thread *td; 1181 size_t adv; 1182 int error, pgadv; 1183 1184 td = curthread; 1185 if ((td->td_pflags & TDP_UIOHELD) == 0 || 1186 uio->uio_segflg != UIO_USERSPACE) 1187 return (uiomove(data, xfersize, uio)); 1188 1189 KASSERT(uio->uio_iovcnt == 1, ("uio_iovcnt %d", uio->uio_iovcnt)); 1190 transp_iov[0].iov_base = data; 1191 transp_uio.uio_iov = &transp_iov[0]; 1192 transp_uio.uio_iovcnt = 1; 1193 if (xfersize > uio->uio_resid) 1194 xfersize = uio->uio_resid; 1195 transp_uio.uio_resid = transp_iov[0].iov_len = xfersize; 1196 transp_uio.uio_offset = 0; 1197 transp_uio.uio_segflg = UIO_SYSSPACE; 1198 /* 1199 * Since transp_iov points to data, and td_ma page array 1200 * corresponds to original uio->uio_iov, we need to invert the 1201 * direction of the i/o operation as passed to 1202 * uiomove_fromphys(). 1203 */ 1204 switch (uio->uio_rw) { 1205 case UIO_WRITE: 1206 transp_uio.uio_rw = UIO_READ; 1207 break; 1208 case UIO_READ: 1209 transp_uio.uio_rw = UIO_WRITE; 1210 break; 1211 } 1212 transp_uio.uio_td = uio->uio_td; 1213 error = uiomove_fromphys(td->td_ma, 1214 ((vm_offset_t)uio->uio_iov->iov_base) & PAGE_MASK, 1215 xfersize, &transp_uio); 1216 adv = xfersize - transp_uio.uio_resid; 1217 pgadv = 1218 (((vm_offset_t)uio->uio_iov->iov_base + adv) >> PAGE_SHIFT) - 1219 (((vm_offset_t)uio->uio_iov->iov_base) >> PAGE_SHIFT); 1220 td->td_ma += pgadv; 1221 KASSERT(td->td_ma_cnt >= pgadv, ("consumed pages %d %d", td->td_ma_cnt, 1222 pgadv)); 1223 td->td_ma_cnt -= pgadv; 1224 uio->uio_iov->iov_base = (char *)uio->uio_iov->iov_base + adv; 1225 uio->uio_iov->iov_len -= adv; 1226 uio->uio_resid -= adv; 1227 uio->uio_offset += adv; 1228 return (error); 1229} 1230 1231int 1232vn_io_fault_pgmove(vm_page_t ma[], vm_offset_t offset, int xfersize, 1233 struct uio *uio) 1234{ 1235 struct thread *td; 1236 vm_offset_t iov_base; 1237 int cnt, pgadv; 1238 1239 td = curthread; 1240 if ((td->td_pflags & TDP_UIOHELD) == 0 || 1241 uio->uio_segflg != UIO_USERSPACE) 1242 return (uiomove_fromphys(ma, offset, xfersize, uio)); 1243 1244 KASSERT(uio->uio_iovcnt == 1, ("uio_iovcnt %d", uio->uio_iovcnt)); 1245 cnt = xfersize > uio->uio_resid ? uio->uio_resid : xfersize; 1246 iov_base = (vm_offset_t)uio->uio_iov->iov_base; 1247 switch (uio->uio_rw) { 1248 case UIO_WRITE: 1249 pmap_copy_pages(td->td_ma, iov_base & PAGE_MASK, ma, 1250 offset, cnt); 1251 break; 1252 case UIO_READ: 1253 pmap_copy_pages(ma, offset, td->td_ma, iov_base & PAGE_MASK, 1254 cnt); 1255 break; 1256 } 1257 pgadv = ((iov_base + cnt) >> PAGE_SHIFT) - (iov_base >> PAGE_SHIFT); 1258 td->td_ma += pgadv; 1259 KASSERT(td->td_ma_cnt >= pgadv, ("consumed pages %d %d", td->td_ma_cnt, 1260 pgadv)); 1261 td->td_ma_cnt -= pgadv; 1262 uio->uio_iov->iov_base = (char *)(iov_base + cnt); 1263 uio->uio_iov->iov_len -= cnt; 1264 uio->uio_resid -= cnt; 1265 uio->uio_offset += cnt; 1266 return (0); 1267} 1268 1269 1270/* 1271 * File table truncate routine. 1272 */ 1273static int 1274vn_truncate(struct file *fp, off_t length, struct ucred *active_cred, 1275 struct thread *td) 1276{ 1277 struct vattr vattr; 1278 struct mount *mp; 1279 struct vnode *vp; 1280 void *rl_cookie; 1281 int error; 1282 1283 vp = fp->f_vnode; 1284 1285 /* 1286 * Lock the whole range for truncation. Otherwise split i/o 1287 * might happen partly before and partly after the truncation. 1288 */ 1289 rl_cookie = vn_rangelock_wlock(vp, 0, OFF_MAX); 1290 error = vn_start_write(vp, &mp, V_WAIT | PCATCH); 1291 if (error) 1292 goto out1; 1293 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1294 if (vp->v_type == VDIR) { 1295 error = EISDIR; 1296 goto out; 1297 } 1298#ifdef MAC 1299 error = mac_vnode_check_write(active_cred, fp->f_cred, vp); 1300 if (error) 1301 goto out; 1302#endif 1303 error = vn_writechk(vp); 1304 if (error == 0) { 1305 VATTR_NULL(&vattr); 1306 vattr.va_size = length; 1307 error = VOP_SETATTR(vp, &vattr, fp->f_cred); 1308 } 1309out: 1310 VOP_UNLOCK(vp, 0); 1311 vn_finished_write(mp); 1312out1: 1313 vn_rangelock_unlock(vp, rl_cookie); 1314 return (error); 1315} 1316 1317/* 1318 * File table vnode stat routine. 1319 */ 1320static int 1321vn_statfile(fp, sb, active_cred, td) 1322 struct file *fp; 1323 struct stat *sb; 1324 struct ucred *active_cred; 1325 struct thread *td; 1326{ 1327 struct vnode *vp = fp->f_vnode; 1328 int error; 1329 1330 vn_lock(vp, LK_SHARED | LK_RETRY); 1331 error = vn_stat(vp, sb, active_cred, fp->f_cred, td); 1332 VOP_UNLOCK(vp, 0); 1333 1334 return (error); 1335} 1336 1337/* 1338 * Stat a vnode; implementation for the stat syscall 1339 */ 1340int 1341vn_stat(vp, sb, active_cred, file_cred, td) 1342 struct vnode *vp; 1343 register struct stat *sb; 1344 struct ucred *active_cred; 1345 struct ucred *file_cred; 1346 struct thread *td; 1347{ 1348 struct vattr vattr; 1349 register struct vattr *vap; 1350 int error; 1351 u_short mode; 1352 1353#ifdef MAC 1354 error = mac_vnode_check_stat(active_cred, file_cred, vp); 1355 if (error) 1356 return (error); 1357#endif 1358 1359 vap = &vattr; 1360 1361 /* 1362 * Initialize defaults for new and unusual fields, so that file 1363 * systems which don't support these fields don't need to know 1364 * about them. 1365 */ 1366 vap->va_birthtime.tv_sec = -1; 1367 vap->va_birthtime.tv_nsec = 0; 1368 vap->va_fsid = VNOVAL; 1369 vap->va_rdev = NODEV; 1370 1371 error = VOP_GETATTR(vp, vap, active_cred); 1372 if (error) 1373 return (error); 1374 1375 /* 1376 * Zero the spare stat fields 1377 */ 1378 bzero(sb, sizeof *sb); 1379 1380 /* 1381 * Copy from vattr table 1382 */ 1383 if (vap->va_fsid != VNOVAL) 1384 sb->st_dev = vap->va_fsid; 1385 else 1386 sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0]; 1387 sb->st_ino = vap->va_fileid; 1388 mode = vap->va_mode; 1389 switch (vap->va_type) { 1390 case VREG: 1391 mode |= S_IFREG; 1392 break; 1393 case VDIR: 1394 mode |= S_IFDIR; 1395 break; 1396 case VBLK: 1397 mode |= S_IFBLK; 1398 break; 1399 case VCHR: 1400 mode |= S_IFCHR; 1401 break; 1402 case VLNK: 1403 mode |= S_IFLNK; 1404 break; 1405 case VSOCK: 1406 mode |= S_IFSOCK; 1407 break; 1408 case VFIFO: 1409 mode |= S_IFIFO; 1410 break; 1411 default: 1412 return (EBADF); 1413 }; 1414 sb->st_mode = mode; 1415 sb->st_nlink = vap->va_nlink; 1416 sb->st_uid = vap->va_uid; 1417 sb->st_gid = vap->va_gid; 1418 sb->st_rdev = vap->va_rdev; 1419 if (vap->va_size > OFF_MAX) 1420 return (EOVERFLOW); 1421 sb->st_size = vap->va_size; 1422 sb->st_atim = vap->va_atime; 1423 sb->st_mtim = vap->va_mtime; 1424 sb->st_ctim = vap->va_ctime; 1425 sb->st_birthtim = vap->va_birthtime; 1426 1427 /* 1428 * According to www.opengroup.org, the meaning of st_blksize is 1429 * "a filesystem-specific preferred I/O block size for this 1430 * object. In some filesystem types, this may vary from file 1431 * to file" 1432 * Use miminum/default of PAGE_SIZE (e.g. for VCHR). 1433 */ 1434 1435 sb->st_blksize = max(PAGE_SIZE, vap->va_blocksize); 1436 1437 sb->st_flags = vap->va_flags; 1438 if (priv_check(td, PRIV_VFS_GENERATION)) 1439 sb->st_gen = 0; 1440 else 1441 sb->st_gen = vap->va_gen; 1442 1443 sb->st_blocks = vap->va_bytes / S_BLKSIZE; 1444 return (0); 1445} 1446 1447/* 1448 * File table vnode ioctl routine. 1449 */ 1450static int 1451vn_ioctl(fp, com, data, active_cred, td) 1452 struct file *fp; 1453 u_long com; 1454 void *data; 1455 struct ucred *active_cred; 1456 struct thread *td; 1457{ 1458 struct vattr vattr; 1459 struct vnode *vp; 1460 int error; 1461 1462 vp = fp->f_vnode; 1463 switch (vp->v_type) { 1464 case VDIR: 1465 case VREG: 1466 switch (com) { 1467 case FIONREAD: 1468 vn_lock(vp, LK_SHARED | LK_RETRY); 1469 error = VOP_GETATTR(vp, &vattr, active_cred); 1470 VOP_UNLOCK(vp, 0); 1471 if (error == 0) 1472 *(int *)data = vattr.va_size - fp->f_offset; 1473 return (error); 1474 case FIONBIO: 1475 case FIOASYNC: 1476 return (0); 1477 default: 1478 return (VOP_IOCTL(vp, com, data, fp->f_flag, 1479 active_cred, td)); 1480 } 1481 default: 1482 return (ENOTTY); 1483 } 1484} 1485 1486/* 1487 * File table vnode poll routine. 1488 */ 1489static int 1490vn_poll(fp, events, active_cred, td) 1491 struct file *fp; 1492 int events; 1493 struct ucred *active_cred; 1494 struct thread *td; 1495{ 1496 struct vnode *vp; 1497 int error; 1498 1499 vp = fp->f_vnode; 1500#ifdef MAC 1501 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1502 error = mac_vnode_check_poll(active_cred, fp->f_cred, vp); 1503 VOP_UNLOCK(vp, 0); 1504 if (!error) 1505#endif 1506 1507 error = VOP_POLL(vp, events, fp->f_cred, td); 1508 return (error); 1509} 1510 1511/* 1512 * Acquire the requested lock and then check for validity. LK_RETRY 1513 * permits vn_lock to return doomed vnodes. 1514 */ 1515int 1516_vn_lock(struct vnode *vp, int flags, char *file, int line) 1517{ 1518 int error; 1519 1520 VNASSERT((flags & LK_TYPE_MASK) != 0, vp, 1521 ("vn_lock called with no locktype.")); 1522 do { 1523#ifdef DEBUG_VFS_LOCKS 1524 KASSERT(vp->v_holdcnt != 0, 1525 ("vn_lock %p: zero hold count", vp)); 1526#endif 1527 error = VOP_LOCK1(vp, flags, file, line); 1528 flags &= ~LK_INTERLOCK; /* Interlock is always dropped. */ 1529 KASSERT((flags & LK_RETRY) == 0 || error == 0, 1530 ("LK_RETRY set with incompatible flags (0x%x) or an error occured (%d)", 1531 flags, error)); 1532 /* 1533 * Callers specify LK_RETRY if they wish to get dead vnodes. 1534 * If RETRY is not set, we return ENOENT instead. 1535 */ 1536 if (error == 0 && vp->v_iflag & VI_DOOMED && 1537 (flags & LK_RETRY) == 0) { 1538 VOP_UNLOCK(vp, 0); 1539 error = ENOENT; 1540 break; 1541 } 1542 } while (flags & LK_RETRY && error != 0); 1543 return (error); 1544} 1545 1546/* 1547 * File table vnode close routine. 1548 */ 1549static int 1550vn_closefile(fp, td) 1551 struct file *fp; 1552 struct thread *td; 1553{ 1554 struct vnode *vp; 1555 struct flock lf; 1556 int error; 1557 1558 vp = fp->f_vnode; 1559 fp->f_ops = &badfileops; 1560 1561 if (fp->f_type == DTYPE_VNODE && fp->f_flag & FHASLOCK) 1562 vref(vp); 1563 1564 error = vn_close(vp, fp->f_flag, fp->f_cred, td); 1565 1566 if (fp->f_type == DTYPE_VNODE && fp->f_flag & FHASLOCK) { 1567 lf.l_whence = SEEK_SET; 1568 lf.l_start = 0; 1569 lf.l_len = 0; 1570 lf.l_type = F_UNLCK; 1571 (void) VOP_ADVLOCK(vp, fp, F_UNLCK, &lf, F_FLOCK); 1572 vrele(vp); 1573 } 1574 return (error); 1575} 1576 1577/* 1578 * Preparing to start a filesystem write operation. If the operation is 1579 * permitted, then we bump the count of operations in progress and 1580 * proceed. If a suspend request is in progress, we wait until the 1581 * suspension is over, and then proceed. 1582 */ 1583static int 1584vn_start_write_locked(struct mount *mp, int flags) 1585{ 1586 int error, mflags; 1587 1588 mtx_assert(MNT_MTX(mp), MA_OWNED); 1589 error = 0; 1590 1591 /* 1592 * Check on status of suspension. 1593 */ 1594 if ((curthread->td_pflags & TDP_IGNSUSP) == 0 || 1595 mp->mnt_susp_owner != curthread) { 1596 mflags = ((mp->mnt_vfc->vfc_flags & VFCF_SBDRY) != 0 ? 1597 (flags & PCATCH) : 0) | (PUSER - 1); 1598 while ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) { 1599 if (flags & V_NOWAIT) { 1600 error = EWOULDBLOCK; 1601 goto unlock; 1602 } 1603 error = msleep(&mp->mnt_flag, MNT_MTX(mp), mflags, 1604 "suspfs", 0); 1605 if (error) 1606 goto unlock; 1607 } 1608 } 1609 if (flags & V_XSLEEP) 1610 goto unlock; 1611 mp->mnt_writeopcount++; 1612unlock: 1613 if (error != 0 || (flags & V_XSLEEP) != 0) 1614 MNT_REL(mp); 1615 MNT_IUNLOCK(mp); 1616 return (error); 1617} 1618 1619int 1620vn_start_write(vp, mpp, flags) 1621 struct vnode *vp; 1622 struct mount **mpp; 1623 int flags; 1624{ 1625 struct mount *mp; 1626 int error; 1627 1628 error = 0; 1629 /* 1630 * If a vnode is provided, get and return the mount point that 1631 * to which it will write. 1632 */ 1633 if (vp != NULL) { 1634 if ((error = VOP_GETWRITEMOUNT(vp, mpp)) != 0) { 1635 *mpp = NULL; 1636 if (error != EOPNOTSUPP) 1637 return (error); 1638 return (0); 1639 } 1640 } 1641 if ((mp = *mpp) == NULL) 1642 return (0); 1643 1644 /* 1645 * VOP_GETWRITEMOUNT() returns with the mp refcount held through 1646 * a vfs_ref(). 1647 * As long as a vnode is not provided we need to acquire a 1648 * refcount for the provided mountpoint too, in order to 1649 * emulate a vfs_ref(). 1650 */ 1651 MNT_ILOCK(mp); 1652 if (vp == NULL) 1653 MNT_REF(mp); 1654 1655 return (vn_start_write_locked(mp, flags)); 1656} 1657 1658/* 1659 * Secondary suspension. Used by operations such as vop_inactive 1660 * routines that are needed by the higher level functions. These 1661 * are allowed to proceed until all the higher level functions have 1662 * completed (indicated by mnt_writeopcount dropping to zero). At that 1663 * time, these operations are halted until the suspension is over. 1664 */ 1665int 1666vn_start_secondary_write(vp, mpp, flags) 1667 struct vnode *vp; 1668 struct mount **mpp; 1669 int flags; 1670{ 1671 struct mount *mp; 1672 int error; 1673 1674 retry: 1675 if (vp != NULL) { 1676 if ((error = VOP_GETWRITEMOUNT(vp, mpp)) != 0) { 1677 *mpp = NULL; 1678 if (error != EOPNOTSUPP) 1679 return (error); 1680 return (0); 1681 } 1682 } 1683 /* 1684 * If we are not suspended or have not yet reached suspended 1685 * mode, then let the operation proceed. 1686 */ 1687 if ((mp = *mpp) == NULL) 1688 return (0); 1689 1690 /* 1691 * VOP_GETWRITEMOUNT() returns with the mp refcount held through 1692 * a vfs_ref(). 1693 * As long as a vnode is not provided we need to acquire a 1694 * refcount for the provided mountpoint too, in order to 1695 * emulate a vfs_ref(). 1696 */ 1697 MNT_ILOCK(mp); 1698 if (vp == NULL) 1699 MNT_REF(mp); 1700 if ((mp->mnt_kern_flag & (MNTK_SUSPENDED | MNTK_SUSPEND2)) == 0) { 1701 mp->mnt_secondary_writes++; 1702 mp->mnt_secondary_accwrites++; 1703 MNT_IUNLOCK(mp); 1704 return (0); 1705 } 1706 if (flags & V_NOWAIT) { 1707 MNT_REL(mp); 1708 MNT_IUNLOCK(mp); 1709 return (EWOULDBLOCK); 1710 } 1711 /* 1712 * Wait for the suspension to finish. 1713 */ 1714 error = msleep(&mp->mnt_flag, MNT_MTX(mp), (PUSER - 1) | PDROP | 1715 ((mp->mnt_vfc->vfc_flags & VFCF_SBDRY) != 0 ? (flags & PCATCH) : 0), 1716 "suspfs", 0); 1717 vfs_rel(mp); 1718 if (error == 0) 1719 goto retry; 1720 return (error); 1721} 1722 1723/* 1724 * Filesystem write operation has completed. If we are suspending and this 1725 * operation is the last one, notify the suspender that the suspension is 1726 * now in effect. 1727 */ 1728void 1729vn_finished_write(mp) 1730 struct mount *mp; 1731{ 1732 if (mp == NULL) 1733 return; 1734 MNT_ILOCK(mp); 1735 MNT_REL(mp); 1736 mp->mnt_writeopcount--; 1737 if (mp->mnt_writeopcount < 0) 1738 panic("vn_finished_write: neg cnt"); 1739 if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 && 1740 mp->mnt_writeopcount <= 0) 1741 wakeup(&mp->mnt_writeopcount); 1742 MNT_IUNLOCK(mp); 1743} 1744 1745 1746/* 1747 * Filesystem secondary write operation has completed. If we are 1748 * suspending and this operation is the last one, notify the suspender 1749 * that the suspension is now in effect. 1750 */ 1751void 1752vn_finished_secondary_write(mp) 1753 struct mount *mp; 1754{ 1755 if (mp == NULL) 1756 return; 1757 MNT_ILOCK(mp); 1758 MNT_REL(mp); 1759 mp->mnt_secondary_writes--; 1760 if (mp->mnt_secondary_writes < 0) 1761 panic("vn_finished_secondary_write: neg cnt"); 1762 if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 && 1763 mp->mnt_secondary_writes <= 0) 1764 wakeup(&mp->mnt_secondary_writes); 1765 MNT_IUNLOCK(mp); 1766} 1767 1768 1769 1770/* 1771 * Request a filesystem to suspend write operations. 1772 */ 1773int 1774vfs_write_suspend(struct mount *mp, int flags) 1775{ 1776 int error; 1777 1778 MNT_ILOCK(mp); 1779 if (mp->mnt_susp_owner == curthread) { 1780 MNT_IUNLOCK(mp); 1781 return (EALREADY); 1782 } 1783 while (mp->mnt_kern_flag & MNTK_SUSPEND) 1784 msleep(&mp->mnt_flag, MNT_MTX(mp), PUSER - 1, "wsuspfs", 0); 1785 1786 /* 1787 * Unmount holds a write reference on the mount point. If we 1788 * own busy reference and drain for writers, we deadlock with 1789 * the reference draining in the unmount path. Callers of 1790 * vfs_write_suspend() must specify VS_SKIP_UNMOUNT if 1791 * vfs_busy() reference is owned and caller is not in the 1792 * unmount context. 1793 */ 1794 if ((flags & VS_SKIP_UNMOUNT) != 0 && 1795 (mp->mnt_kern_flag & MNTK_UNMOUNT) != 0) { 1796 MNT_IUNLOCK(mp); 1797 return (EBUSY); 1798 } 1799 1800 mp->mnt_kern_flag |= MNTK_SUSPEND; 1801 mp->mnt_susp_owner = curthread; 1802 if (mp->mnt_writeopcount > 0) 1803 (void) msleep(&mp->mnt_writeopcount, 1804 MNT_MTX(mp), (PUSER - 1)|PDROP, "suspwt", 0); 1805 else 1806 MNT_IUNLOCK(mp); 1807 if ((error = VFS_SYNC(mp, MNT_SUSPEND)) != 0) 1808 vfs_write_resume(mp, 0); 1809 return (error); 1810} 1811 1812/* 1813 * Request a filesystem to resume write operations. 1814 */ 1815void 1816vfs_write_resume(struct mount *mp, int flags) 1817{ 1818 1819 MNT_ILOCK(mp); 1820 if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) { 1821 KASSERT(mp->mnt_susp_owner == curthread, ("mnt_susp_owner")); 1822 mp->mnt_kern_flag &= ~(MNTK_SUSPEND | MNTK_SUSPEND2 | 1823 MNTK_SUSPENDED); 1824 mp->mnt_susp_owner = NULL; 1825 wakeup(&mp->mnt_writeopcount); 1826 wakeup(&mp->mnt_flag); 1827 curthread->td_pflags &= ~TDP_IGNSUSP; 1828 if ((flags & VR_START_WRITE) != 0) { 1829 MNT_REF(mp); 1830 mp->mnt_writeopcount++; 1831 } 1832 MNT_IUNLOCK(mp); 1833 if ((flags & VR_NO_SUSPCLR) == 0) 1834 VFS_SUSP_CLEAN(mp); 1835 } else if ((flags & VR_START_WRITE) != 0) { 1836 MNT_REF(mp); 1837 vn_start_write_locked(mp, 0); 1838 } else { 1839 MNT_IUNLOCK(mp); 1840 } 1841} 1842 1843/* 1844 * Helper loop around vfs_write_suspend() for filesystem unmount VFS 1845 * methods. 1846 */ 1847int 1848vfs_write_suspend_umnt(struct mount *mp) 1849{ 1850 int error; 1851 1852 KASSERT((curthread->td_pflags & TDP_IGNSUSP) == 0, 1853 ("vfs_write_suspend_umnt: recursed")); 1854 1855 /* dounmount() already called vn_start_write(). */ 1856 for (;;) { 1857 vn_finished_write(mp); 1858 error = vfs_write_suspend(mp, 0); 1859 if (error != 0) { 1860 vn_start_write(NULL, &mp, V_WAIT); 1861 return (error); 1862 } 1863 MNT_ILOCK(mp); 1864 if ((mp->mnt_kern_flag & MNTK_SUSPENDED) != 0) 1865 break; 1866 MNT_IUNLOCK(mp); 1867 vn_start_write(NULL, &mp, V_WAIT); 1868 } 1869 mp->mnt_kern_flag &= ~(MNTK_SUSPENDED | MNTK_SUSPEND2); 1870 wakeup(&mp->mnt_flag); 1871 MNT_IUNLOCK(mp); 1872 curthread->td_pflags |= TDP_IGNSUSP; 1873 return (0); 1874} 1875 1876/* 1877 * Implement kqueues for files by translating it to vnode operation. 1878 */ 1879static int 1880vn_kqfilter(struct file *fp, struct knote *kn) 1881{ 1882 1883 return (VOP_KQFILTER(fp->f_vnode, kn)); 1884} 1885 1886/* 1887 * Simplified in-kernel wrapper calls for extended attribute access. 1888 * Both calls pass in a NULL credential, authorizing as "kernel" access. 1889 * Set IO_NODELOCKED in ioflg if the vnode is already locked. 1890 */ 1891int 1892vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace, 1893 const char *attrname, int *buflen, char *buf, struct thread *td) 1894{ 1895 struct uio auio; 1896 struct iovec iov; 1897 int error; 1898 1899 iov.iov_len = *buflen; 1900 iov.iov_base = buf; 1901 1902 auio.uio_iov = &iov; 1903 auio.uio_iovcnt = 1; 1904 auio.uio_rw = UIO_READ; 1905 auio.uio_segflg = UIO_SYSSPACE; 1906 auio.uio_td = td; 1907 auio.uio_offset = 0; 1908 auio.uio_resid = *buflen; 1909 1910 if ((ioflg & IO_NODELOCKED) == 0) 1911 vn_lock(vp, LK_SHARED | LK_RETRY); 1912 1913 ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held"); 1914 1915 /* authorize attribute retrieval as kernel */ 1916 error = VOP_GETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, NULL, 1917 td); 1918 1919 if ((ioflg & IO_NODELOCKED) == 0) 1920 VOP_UNLOCK(vp, 0); 1921 1922 if (error == 0) { 1923 *buflen = *buflen - auio.uio_resid; 1924 } 1925 1926 return (error); 1927} 1928 1929/* 1930 * XXX failure mode if partially written? 1931 */ 1932int 1933vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace, 1934 const char *attrname, int buflen, char *buf, struct thread *td) 1935{ 1936 struct uio auio; 1937 struct iovec iov; 1938 struct mount *mp; 1939 int error; 1940 1941 iov.iov_len = buflen; 1942 iov.iov_base = buf; 1943 1944 auio.uio_iov = &iov; 1945 auio.uio_iovcnt = 1; 1946 auio.uio_rw = UIO_WRITE; 1947 auio.uio_segflg = UIO_SYSSPACE; 1948 auio.uio_td = td; 1949 auio.uio_offset = 0; 1950 auio.uio_resid = buflen; 1951 1952 if ((ioflg & IO_NODELOCKED) == 0) { 1953 if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0) 1954 return (error); 1955 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1956 } 1957 1958 ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held"); 1959 1960 /* authorize attribute setting as kernel */ 1961 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, td); 1962 1963 if ((ioflg & IO_NODELOCKED) == 0) { 1964 vn_finished_write(mp); 1965 VOP_UNLOCK(vp, 0); 1966 } 1967 1968 return (error); 1969} 1970 1971int 1972vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace, 1973 const char *attrname, struct thread *td) 1974{ 1975 struct mount *mp; 1976 int error; 1977 1978 if ((ioflg & IO_NODELOCKED) == 0) { 1979 if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0) 1980 return (error); 1981 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1982 } 1983 1984 ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held"); 1985 1986 /* authorize attribute removal as kernel */ 1987 error = VOP_DELETEEXTATTR(vp, attrnamespace, attrname, NULL, td); 1988 if (error == EOPNOTSUPP) 1989 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, NULL, 1990 NULL, td); 1991 1992 if ((ioflg & IO_NODELOCKED) == 0) { 1993 vn_finished_write(mp); 1994 VOP_UNLOCK(vp, 0); 1995 } 1996 1997 return (error); 1998} 1999 2000static int 2001vn_get_ino_alloc_vget(struct mount *mp, void *arg, int lkflags, 2002 struct vnode **rvp) 2003{ 2004 2005 return (VFS_VGET(mp, *(ino_t *)arg, lkflags, rvp)); 2006} 2007 2008int 2009vn_vget_ino(struct vnode *vp, ino_t ino, int lkflags, struct vnode **rvp) 2010{ 2011 2012 return (vn_vget_ino_gen(vp, vn_get_ino_alloc_vget, &ino, 2013 lkflags, rvp)); 2014} 2015 2016int 2017vn_vget_ino_gen(struct vnode *vp, vn_get_ino_t alloc, void *alloc_arg, 2018 int lkflags, struct vnode **rvp) 2019{ 2020 struct mount *mp; 2021 int ltype, error; 2022 2023 ASSERT_VOP_LOCKED(vp, "vn_vget_ino_get"); 2024 mp = vp->v_mount; 2025 ltype = VOP_ISLOCKED(vp); 2026 KASSERT(ltype == LK_EXCLUSIVE || ltype == LK_SHARED, 2027 ("vn_vget_ino: vp not locked")); 2028 error = vfs_busy(mp, MBF_NOWAIT); 2029 if (error != 0) { 2030 vfs_ref(mp); 2031 VOP_UNLOCK(vp, 0); 2032 error = vfs_busy(mp, 0); 2033 vn_lock(vp, ltype | LK_RETRY); 2034 vfs_rel(mp); 2035 if (error != 0) 2036 return (ENOENT); 2037 if (vp->v_iflag & VI_DOOMED) { 2038 vfs_unbusy(mp); 2039 return (ENOENT); 2040 } 2041 } 2042 VOP_UNLOCK(vp, 0); 2043 error = alloc(mp, alloc_arg, lkflags, rvp); 2044 vfs_unbusy(mp); 2045 if (*rvp != vp) 2046 vn_lock(vp, ltype | LK_RETRY); 2047 if (vp->v_iflag & VI_DOOMED) { 2048 if (error == 0) { 2049 if (*rvp == vp) 2050 vunref(vp); 2051 else 2052 vput(*rvp); 2053 } 2054 error = ENOENT; 2055 } 2056 return (error); 2057} 2058 2059int 2060vn_rlimit_fsize(const struct vnode *vp, const struct uio *uio, 2061 const struct thread *td) 2062{ 2063 2064 if (vp->v_type != VREG || td == NULL) 2065 return (0); 2066 PROC_LOCK(td->td_proc); 2067 if ((uoff_t)uio->uio_offset + uio->uio_resid > 2068 lim_cur(td->td_proc, RLIMIT_FSIZE)) { 2069 kern_psignal(td->td_proc, SIGXFSZ); 2070 PROC_UNLOCK(td->td_proc); 2071 return (EFBIG); 2072 } 2073 PROC_UNLOCK(td->td_proc); 2074 return (0); 2075} 2076 2077int 2078vn_chmod(struct file *fp, mode_t mode, struct ucred *active_cred, 2079 struct thread *td) 2080{ 2081 struct vnode *vp; 2082 2083 vp = fp->f_vnode; 2084#ifdef AUDIT 2085 vn_lock(vp, LK_SHARED | LK_RETRY); 2086 AUDIT_ARG_VNODE1(vp); 2087 VOP_UNLOCK(vp, 0); 2088#endif 2089 return (setfmode(td, active_cred, vp, mode)); 2090} 2091 2092int 2093vn_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred, 2094 struct thread *td) 2095{ 2096 struct vnode *vp; 2097 2098 vp = fp->f_vnode; 2099#ifdef AUDIT 2100 vn_lock(vp, LK_SHARED | LK_RETRY); 2101 AUDIT_ARG_VNODE1(vp); 2102 VOP_UNLOCK(vp, 0); 2103#endif 2104 return (setfown(td, active_cred, vp, uid, gid)); 2105} 2106 2107void 2108vn_pages_remove(struct vnode *vp, vm_pindex_t start, vm_pindex_t end) 2109{ 2110 vm_object_t object; 2111 2112 if ((object = vp->v_object) == NULL) 2113 return; 2114 VM_OBJECT_WLOCK(object); 2115 vm_object_page_remove(object, start, end, 0); 2116 VM_OBJECT_WUNLOCK(object); 2117} 2118 2119int 2120vn_bmap_seekhole(struct vnode *vp, u_long cmd, off_t *off, struct ucred *cred) 2121{ 2122 struct vattr va; 2123 daddr_t bn, bnp; 2124 uint64_t bsize; 2125 off_t noff; 2126 int error; 2127 2128 KASSERT(cmd == FIOSEEKHOLE || cmd == FIOSEEKDATA, 2129 ("Wrong command %lu", cmd)); 2130 2131 if (vn_lock(vp, LK_SHARED) != 0) 2132 return (EBADF); 2133 if (vp->v_type != VREG) { 2134 error = ENOTTY; 2135 goto unlock; 2136 } 2137 error = VOP_GETATTR(vp, &va, cred); 2138 if (error != 0) 2139 goto unlock; 2140 noff = *off; 2141 if (noff >= va.va_size) { 2142 error = ENXIO; 2143 goto unlock; 2144 } 2145 bsize = vp->v_mount->mnt_stat.f_iosize; 2146 for (bn = noff / bsize; noff < va.va_size; bn++, noff += bsize) { 2147 error = VOP_BMAP(vp, bn, NULL, &bnp, NULL, NULL); 2148 if (error == EOPNOTSUPP) { 2149 error = ENOTTY; 2150 goto unlock; 2151 } 2152 if ((bnp == -1 && cmd == FIOSEEKHOLE) || 2153 (bnp != -1 && cmd == FIOSEEKDATA)) { 2154 noff = bn * bsize; 2155 if (noff < *off) 2156 noff = *off; 2157 goto unlock; 2158 } 2159 } 2160 if (noff > va.va_size) 2161 noff = va.va_size; 2162 /* noff == va.va_size. There is an implicit hole at the end of file. */ 2163 if (cmd == FIOSEEKDATA) 2164 error = ENXIO; 2165unlock: 2166 VOP_UNLOCK(vp, 0); 2167 if (error == 0) 2168 *off = noff; 2169 return (error); 2170} 2171 2172int 2173vn_seek(struct file *fp, off_t offset, int whence, struct thread *td) 2174{ 2175 struct ucred *cred; 2176 struct vnode *vp; 2177 struct vattr vattr; 2178 off_t foffset, size; 2179 int error, noneg; 2180 2181 cred = td->td_ucred; 2182 vp = fp->f_vnode; 2183 foffset = foffset_lock(fp, 0); 2184 noneg = (vp->v_type != VCHR); 2185 error = 0; 2186 switch (whence) { 2187 case L_INCR: 2188 if (noneg && 2189 (foffset < 0 || 2190 (offset > 0 && foffset > OFF_MAX - offset))) { 2191 error = EOVERFLOW; 2192 break; 2193 } 2194 offset += foffset; 2195 break; 2196 case L_XTND: 2197 vn_lock(vp, LK_SHARED | LK_RETRY); 2198 error = VOP_GETATTR(vp, &vattr, cred); 2199 VOP_UNLOCK(vp, 0); 2200 if (error) 2201 break; 2202 2203 /* 2204 * If the file references a disk device, then fetch 2205 * the media size and use that to determine the ending 2206 * offset. 2207 */ 2208 if (vattr.va_size == 0 && vp->v_type == VCHR && 2209 fo_ioctl(fp, DIOCGMEDIASIZE, &size, cred, td) == 0) 2210 vattr.va_size = size; 2211 if (noneg && 2212 (vattr.va_size > OFF_MAX || 2213 (offset > 0 && vattr.va_size > OFF_MAX - offset))) { 2214 error = EOVERFLOW; 2215 break; 2216 } 2217 offset += vattr.va_size; 2218 break; 2219 case L_SET: 2220 break; 2221 case SEEK_DATA: 2222 error = fo_ioctl(fp, FIOSEEKDATA, &offset, cred, td); 2223 break; 2224 case SEEK_HOLE: 2225 error = fo_ioctl(fp, FIOSEEKHOLE, &offset, cred, td); 2226 break; 2227 default: 2228 error = EINVAL; 2229 } 2230 if (error == 0 && noneg && offset < 0) 2231 error = EINVAL; 2232 if (error != 0) 2233 goto drop; 2234 VFS_KNOTE_UNLOCKED(vp, 0); 2235 *(off_t *)(td->td_retval) = offset; 2236drop: 2237 foffset_unlock(fp, offset, error != 0 ? FOF_NOUPDATE : 0); 2238 return (error); 2239} 2240 2241int 2242vn_utimes_perm(struct vnode *vp, struct vattr *vap, struct ucred *cred, 2243 struct thread *td) 2244{ 2245 int error; 2246 2247 /* 2248 * Grant permission if the caller is the owner of the file, or 2249 * the super-user, or has ACL_WRITE_ATTRIBUTES permission on 2250 * on the file. If the time pointer is null, then write 2251 * permission on the file is also sufficient. 2252 * 2253 * From NFSv4.1, draft 21, 6.2.1.3.1, Discussion of Mask Attributes: 2254 * A user having ACL_WRITE_DATA or ACL_WRITE_ATTRIBUTES 2255 * will be allowed to set the times [..] to the current 2256 * server time. 2257 */ 2258 error = VOP_ACCESSX(vp, VWRITE_ATTRIBUTES, cred, td); 2259 if (error != 0 && (vap->va_vaflags & VA_UTIMES_NULL) != 0) 2260 error = VOP_ACCESS(vp, VWRITE, cred, td); 2261 return (error); 2262} 2263