vfs_vnops.c revision 301100
1/*- 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Copyright (c) 2012 Konstantin Belousov <kib@FreeBSD.org> 11 * Copyright (c) 2013, 2014 The FreeBSD Foundation 12 * 13 * Portions of this software were developed by Konstantin Belousov 14 * under sponsorship from the FreeBSD Foundation. 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions 18 * are met: 19 * 1. Redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer. 21 * 2. Redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * @(#)vfs_vnops.c 8.2 (Berkeley) 1/21/94 41 */ 42 43#include <sys/cdefs.h> 44__FBSDID("$FreeBSD: stable/10/sys/kern/vfs_vnops.c 301100 2016-06-01 04:07:33Z kib $"); 45 46#include <sys/param.h> 47#include <sys/systm.h> 48#include <sys/disk.h> 49#include <sys/fcntl.h> 50#include <sys/file.h> 51#include <sys/kdb.h> 52#include <sys/stat.h> 53#include <sys/priv.h> 54#include <sys/proc.h> 55#include <sys/limits.h> 56#include <sys/lock.h> 57#include <sys/mount.h> 58#include <sys/mutex.h> 59#include <sys/namei.h> 60#include <sys/vnode.h> 61#include <sys/bio.h> 62#include <sys/buf.h> 63#include <sys/filio.h> 64#include <sys/resourcevar.h> 65#include <sys/rwlock.h> 66#include <sys/sx.h> 67#include <sys/sysctl.h> 68#include <sys/ttycom.h> 69#include <sys/conf.h> 70#include <sys/syslog.h> 71#include <sys/unistd.h> 72 73#include <security/audit/audit.h> 74#include <security/mac/mac_framework.h> 75 76#include <vm/vm.h> 77#include <vm/vm_extern.h> 78#include <vm/pmap.h> 79#include <vm/vm_map.h> 80#include <vm/vm_object.h> 81#include <vm/vm_page.h> 82 83static fo_rdwr_t vn_read; 84static fo_rdwr_t vn_write; 85static fo_rdwr_t vn_io_fault; 86static fo_truncate_t vn_truncate; 87static fo_ioctl_t vn_ioctl; 88static fo_poll_t vn_poll; 89static fo_kqfilter_t vn_kqfilter; 90static fo_stat_t vn_statfile; 91static fo_close_t vn_closefile; 92 93struct fileops vnops = { 94 .fo_read = vn_io_fault, 95 .fo_write = vn_io_fault, 96 .fo_truncate = vn_truncate, 97 .fo_ioctl = vn_ioctl, 98 .fo_poll = vn_poll, 99 .fo_kqfilter = vn_kqfilter, 100 .fo_stat = vn_statfile, 101 .fo_close = vn_closefile, 102 .fo_chmod = vn_chmod, 103 .fo_chown = vn_chown, 104 .fo_sendfile = vn_sendfile, 105 .fo_seek = vn_seek, 106 .fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE 107}; 108 109static const int io_hold_cnt = 16; 110static int vn_io_fault_enable = 1; 111SYSCTL_INT(_debug, OID_AUTO, vn_io_fault_enable, CTLFLAG_RW, 112 &vn_io_fault_enable, 0, "Enable vn_io_fault lock avoidance"); 113static int vn_io_fault_prefault = 0; 114SYSCTL_INT(_debug, OID_AUTO, vn_io_fault_prefault, CTLFLAG_RW, 115 &vn_io_fault_prefault, 0, "Enable vn_io_fault prefaulting"); 116static u_long vn_io_faults_cnt; 117SYSCTL_ULONG(_debug, OID_AUTO, vn_io_faults, CTLFLAG_RD, 118 &vn_io_faults_cnt, 0, "Count of vn_io_fault lock avoidance triggers"); 119 120/* 121 * Returns true if vn_io_fault mode of handling the i/o request should 122 * be used. 123 */ 124static bool 125do_vn_io_fault(struct vnode *vp, struct uio *uio) 126{ 127 struct mount *mp; 128 129 return (uio->uio_segflg == UIO_USERSPACE && vp->v_type == VREG && 130 (mp = vp->v_mount) != NULL && 131 (mp->mnt_kern_flag & MNTK_NO_IOPF) != 0 && vn_io_fault_enable); 132} 133 134/* 135 * Structure used to pass arguments to vn_io_fault1(), to do either 136 * file- or vnode-based I/O calls. 137 */ 138struct vn_io_fault_args { 139 enum { 140 VN_IO_FAULT_FOP, 141 VN_IO_FAULT_VOP 142 } kind; 143 struct ucred *cred; 144 int flags; 145 union { 146 struct fop_args_tag { 147 struct file *fp; 148 fo_rdwr_t *doio; 149 } fop_args; 150 struct vop_args_tag { 151 struct vnode *vp; 152 } vop_args; 153 } args; 154}; 155 156static int vn_io_fault1(struct vnode *vp, struct uio *uio, 157 struct vn_io_fault_args *args, struct thread *td); 158 159int 160vn_open(ndp, flagp, cmode, fp) 161 struct nameidata *ndp; 162 int *flagp, cmode; 163 struct file *fp; 164{ 165 struct thread *td = ndp->ni_cnd.cn_thread; 166 167 return (vn_open_cred(ndp, flagp, cmode, 0, td->td_ucred, fp)); 168} 169 170/* 171 * Common code for vnode open operations via a name lookup. 172 * Lookup the vnode and invoke VOP_CREATE if needed. 173 * Check permissions, and call the VOP_OPEN or VOP_CREATE routine. 174 * 175 * Note that this does NOT free nameidata for the successful case, 176 * due to the NDINIT being done elsewhere. 177 */ 178int 179vn_open_cred(struct nameidata *ndp, int *flagp, int cmode, u_int vn_open_flags, 180 struct ucred *cred, struct file *fp) 181{ 182 struct vnode *vp; 183 struct mount *mp; 184 struct thread *td = ndp->ni_cnd.cn_thread; 185 struct vattr vat; 186 struct vattr *vap = &vat; 187 int fmode, error; 188 189restart: 190 fmode = *flagp; 191 if ((fmode & (O_CREAT | O_EXCL | O_DIRECTORY)) == (O_CREAT | 192 O_EXCL | O_DIRECTORY)) 193 return (EINVAL); 194 else if ((fmode & (O_CREAT | O_DIRECTORY)) == O_CREAT) { 195 ndp->ni_cnd.cn_nameiop = CREATE; 196 /* 197 * Set NOCACHE to avoid flushing the cache when 198 * rolling in many files at once. 199 */ 200 ndp->ni_cnd.cn_flags = ISOPEN | LOCKPARENT | LOCKLEAF | NOCACHE; 201 if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0) 202 ndp->ni_cnd.cn_flags |= FOLLOW; 203 if (!(vn_open_flags & VN_OPEN_NOAUDIT)) 204 ndp->ni_cnd.cn_flags |= AUDITVNODE1; 205 if (vn_open_flags & VN_OPEN_NOCAPCHECK) 206 ndp->ni_cnd.cn_flags |= NOCAPCHECK; 207 bwillwrite(); 208 if ((error = namei(ndp)) != 0) 209 return (error); 210 if (ndp->ni_vp == NULL) { 211 VATTR_NULL(vap); 212 vap->va_type = VREG; 213 vap->va_mode = cmode; 214 if (fmode & O_EXCL) 215 vap->va_vaflags |= VA_EXCLUSIVE; 216 if (vn_start_write(ndp->ni_dvp, &mp, V_NOWAIT) != 0) { 217 NDFREE(ndp, NDF_ONLY_PNBUF); 218 vput(ndp->ni_dvp); 219 if ((error = vn_start_write(NULL, &mp, 220 V_XSLEEP | PCATCH)) != 0) 221 return (error); 222 goto restart; 223 } 224 if ((vn_open_flags & VN_OPEN_NAMECACHE) != 0) 225 ndp->ni_cnd.cn_flags |= MAKEENTRY; 226#ifdef MAC 227 error = mac_vnode_check_create(cred, ndp->ni_dvp, 228 &ndp->ni_cnd, vap); 229 if (error == 0) 230#endif 231 error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp, 232 &ndp->ni_cnd, vap); 233 vput(ndp->ni_dvp); 234 vn_finished_write(mp); 235 if (error) { 236 NDFREE(ndp, NDF_ONLY_PNBUF); 237 return (error); 238 } 239 fmode &= ~O_TRUNC; 240 vp = ndp->ni_vp; 241 } else { 242 if (ndp->ni_dvp == ndp->ni_vp) 243 vrele(ndp->ni_dvp); 244 else 245 vput(ndp->ni_dvp); 246 ndp->ni_dvp = NULL; 247 vp = ndp->ni_vp; 248 if (fmode & O_EXCL) { 249 error = EEXIST; 250 goto bad; 251 } 252 fmode &= ~O_CREAT; 253 } 254 } else { 255 ndp->ni_cnd.cn_nameiop = LOOKUP; 256 ndp->ni_cnd.cn_flags = ISOPEN | 257 ((fmode & O_NOFOLLOW) ? NOFOLLOW : FOLLOW) | LOCKLEAF; 258 if (!(fmode & FWRITE)) 259 ndp->ni_cnd.cn_flags |= LOCKSHARED; 260 if (!(vn_open_flags & VN_OPEN_NOAUDIT)) 261 ndp->ni_cnd.cn_flags |= AUDITVNODE1; 262 if (vn_open_flags & VN_OPEN_NOCAPCHECK) 263 ndp->ni_cnd.cn_flags |= NOCAPCHECK; 264 if ((error = namei(ndp)) != 0) 265 return (error); 266 vp = ndp->ni_vp; 267 } 268 error = vn_open_vnode(vp, fmode, cred, td, fp); 269 if (error) 270 goto bad; 271 *flagp = fmode; 272 return (0); 273bad: 274 NDFREE(ndp, NDF_ONLY_PNBUF); 275 vput(vp); 276 *flagp = fmode; 277 ndp->ni_vp = NULL; 278 return (error); 279} 280 281/* 282 * Common code for vnode open operations once a vnode is located. 283 * Check permissions, and call the VOP_OPEN routine. 284 */ 285int 286vn_open_vnode(struct vnode *vp, int fmode, struct ucred *cred, 287 struct thread *td, struct file *fp) 288{ 289 struct mount *mp; 290 accmode_t accmode; 291 struct flock lf; 292 int error, have_flock, lock_flags, type; 293 294 if (vp->v_type == VLNK) 295 return (EMLINK); 296 if (vp->v_type == VSOCK) 297 return (EOPNOTSUPP); 298 if (vp->v_type != VDIR && fmode & O_DIRECTORY) 299 return (ENOTDIR); 300 accmode = 0; 301 if (fmode & (FWRITE | O_TRUNC)) { 302 if (vp->v_type == VDIR) 303 return (EISDIR); 304 accmode |= VWRITE; 305 } 306 if (fmode & FREAD) 307 accmode |= VREAD; 308 if (fmode & FEXEC) 309 accmode |= VEXEC; 310 if ((fmode & O_APPEND) && (fmode & FWRITE)) 311 accmode |= VAPPEND; 312#ifdef MAC 313 error = mac_vnode_check_open(cred, vp, accmode); 314 if (error) 315 return (error); 316#endif 317 if ((fmode & O_CREAT) == 0) { 318 if (accmode & VWRITE) { 319 error = vn_writechk(vp); 320 if (error) 321 return (error); 322 } 323 if (accmode) { 324 error = VOP_ACCESS(vp, accmode, cred, td); 325 if (error) 326 return (error); 327 } 328 } 329 if (vp->v_type == VFIFO && VOP_ISLOCKED(vp) != LK_EXCLUSIVE) 330 vn_lock(vp, LK_UPGRADE | LK_RETRY); 331 if ((error = VOP_OPEN(vp, fmode, cred, td, fp)) != 0) 332 return (error); 333 334 if (fmode & (O_EXLOCK | O_SHLOCK)) { 335 KASSERT(fp != NULL, ("open with flock requires fp")); 336 lock_flags = VOP_ISLOCKED(vp); 337 VOP_UNLOCK(vp, 0); 338 lf.l_whence = SEEK_SET; 339 lf.l_start = 0; 340 lf.l_len = 0; 341 if (fmode & O_EXLOCK) 342 lf.l_type = F_WRLCK; 343 else 344 lf.l_type = F_RDLCK; 345 type = F_FLOCK; 346 if ((fmode & FNONBLOCK) == 0) 347 type |= F_WAIT; 348 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, type); 349 have_flock = (error == 0); 350 vn_lock(vp, lock_flags | LK_RETRY); 351 if (error == 0 && vp->v_iflag & VI_DOOMED) 352 error = ENOENT; 353 /* 354 * Another thread might have used this vnode as an 355 * executable while the vnode lock was dropped. 356 * Ensure the vnode is still able to be opened for 357 * writing after the lock has been obtained. 358 */ 359 if (error == 0 && accmode & VWRITE) 360 error = vn_writechk(vp); 361 if (error) { 362 VOP_UNLOCK(vp, 0); 363 if (have_flock) { 364 lf.l_whence = SEEK_SET; 365 lf.l_start = 0; 366 lf.l_len = 0; 367 lf.l_type = F_UNLCK; 368 (void) VOP_ADVLOCK(vp, fp, F_UNLCK, &lf, 369 F_FLOCK); 370 } 371 vn_start_write(vp, &mp, V_WAIT); 372 vn_lock(vp, lock_flags | LK_RETRY); 373 (void)VOP_CLOSE(vp, fmode, cred, td); 374 vn_finished_write(mp); 375 /* Prevent second close from fdrop()->vn_close(). */ 376 if (fp != NULL) 377 fp->f_ops= &badfileops; 378 return (error); 379 } 380 fp->f_flag |= FHASLOCK; 381 } 382 if (fmode & FWRITE) { 383 VOP_ADD_WRITECOUNT(vp, 1); 384 CTR3(KTR_VFS, "%s: vp %p v_writecount increased to %d", 385 __func__, vp, vp->v_writecount); 386 } 387 ASSERT_VOP_LOCKED(vp, "vn_open_vnode"); 388 return (0); 389} 390 391/* 392 * Check for write permissions on the specified vnode. 393 * Prototype text segments cannot be written. 394 */ 395int 396vn_writechk(vp) 397 register struct vnode *vp; 398{ 399 400 ASSERT_VOP_LOCKED(vp, "vn_writechk"); 401 /* 402 * If there's shared text associated with 403 * the vnode, try to free it up once. If 404 * we fail, we can't allow writing. 405 */ 406 if (VOP_IS_TEXT(vp)) 407 return (ETXTBSY); 408 409 return (0); 410} 411 412/* 413 * Vnode close call 414 */ 415int 416vn_close(vp, flags, file_cred, td) 417 register struct vnode *vp; 418 int flags; 419 struct ucred *file_cred; 420 struct thread *td; 421{ 422 struct mount *mp; 423 int error, lock_flags; 424 425 if (vp->v_type != VFIFO && (flags & FWRITE) == 0 && 426 MNT_EXTENDED_SHARED(vp->v_mount)) 427 lock_flags = LK_SHARED; 428 else 429 lock_flags = LK_EXCLUSIVE; 430 431 vn_start_write(vp, &mp, V_WAIT); 432 vn_lock(vp, lock_flags | LK_RETRY); 433 if (flags & FWRITE) { 434 VNASSERT(vp->v_writecount > 0, vp, 435 ("vn_close: negative writecount")); 436 VOP_ADD_WRITECOUNT(vp, -1); 437 CTR3(KTR_VFS, "%s: vp %p v_writecount decreased to %d", 438 __func__, vp, vp->v_writecount); 439 } 440 error = VOP_CLOSE(vp, flags, file_cred, td); 441 vput(vp); 442 vn_finished_write(mp); 443 return (error); 444} 445 446/* 447 * Heuristic to detect sequential operation. 448 */ 449static int 450sequential_heuristic(struct uio *uio, struct file *fp) 451{ 452 453 ASSERT_VOP_LOCKED(fp->f_vnode, __func__); 454 if (fp->f_flag & FRDAHEAD) 455 return (fp->f_seqcount << IO_SEQSHIFT); 456 457 /* 458 * Offset 0 is handled specially. open() sets f_seqcount to 1 so 459 * that the first I/O is normally considered to be slightly 460 * sequential. Seeking to offset 0 doesn't change sequentiality 461 * unless previous seeks have reduced f_seqcount to 0, in which 462 * case offset 0 is not special. 463 */ 464 if ((uio->uio_offset == 0 && fp->f_seqcount > 0) || 465 uio->uio_offset == fp->f_nextoff) { 466 /* 467 * f_seqcount is in units of fixed-size blocks so that it 468 * depends mainly on the amount of sequential I/O and not 469 * much on the number of sequential I/O's. The fixed size 470 * of 16384 is hard-coded here since it is (not quite) just 471 * a magic size that works well here. This size is more 472 * closely related to the best I/O size for real disks than 473 * to any block size used by software. 474 */ 475 fp->f_seqcount += howmany(uio->uio_resid, 16384); 476 if (fp->f_seqcount > IO_SEQMAX) 477 fp->f_seqcount = IO_SEQMAX; 478 return (fp->f_seqcount << IO_SEQSHIFT); 479 } 480 481 /* Not sequential. Quickly draw-down sequentiality. */ 482 if (fp->f_seqcount > 1) 483 fp->f_seqcount = 1; 484 else 485 fp->f_seqcount = 0; 486 return (0); 487} 488 489/* 490 * Package up an I/O request on a vnode into a uio and do it. 491 */ 492int 493vn_rdwr(enum uio_rw rw, struct vnode *vp, void *base, int len, off_t offset, 494 enum uio_seg segflg, int ioflg, struct ucred *active_cred, 495 struct ucred *file_cred, ssize_t *aresid, struct thread *td) 496{ 497 struct uio auio; 498 struct iovec aiov; 499 struct mount *mp; 500 struct ucred *cred; 501 void *rl_cookie; 502 struct vn_io_fault_args args; 503 int error, lock_flags; 504 505 auio.uio_iov = &aiov; 506 auio.uio_iovcnt = 1; 507 aiov.iov_base = base; 508 aiov.iov_len = len; 509 auio.uio_resid = len; 510 auio.uio_offset = offset; 511 auio.uio_segflg = segflg; 512 auio.uio_rw = rw; 513 auio.uio_td = td; 514 error = 0; 515 516 if ((ioflg & IO_NODELOCKED) == 0) { 517 if ((ioflg & IO_RANGELOCKED) == 0) { 518 if (rw == UIO_READ) { 519 rl_cookie = vn_rangelock_rlock(vp, offset, 520 offset + len); 521 } else { 522 rl_cookie = vn_rangelock_wlock(vp, offset, 523 offset + len); 524 } 525 } else 526 rl_cookie = NULL; 527 mp = NULL; 528 if (rw == UIO_WRITE) { 529 if (vp->v_type != VCHR && 530 (error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) 531 != 0) 532 goto out; 533 if (MNT_SHARED_WRITES(mp) || 534 ((mp == NULL) && MNT_SHARED_WRITES(vp->v_mount))) 535 lock_flags = LK_SHARED; 536 else 537 lock_flags = LK_EXCLUSIVE; 538 } else 539 lock_flags = LK_SHARED; 540 vn_lock(vp, lock_flags | LK_RETRY); 541 } else 542 rl_cookie = NULL; 543 544 ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held"); 545#ifdef MAC 546 if ((ioflg & IO_NOMACCHECK) == 0) { 547 if (rw == UIO_READ) 548 error = mac_vnode_check_read(active_cred, file_cred, 549 vp); 550 else 551 error = mac_vnode_check_write(active_cred, file_cred, 552 vp); 553 } 554#endif 555 if (error == 0) { 556 if (file_cred != NULL) 557 cred = file_cred; 558 else 559 cred = active_cred; 560 if (do_vn_io_fault(vp, &auio)) { 561 args.kind = VN_IO_FAULT_VOP; 562 args.cred = cred; 563 args.flags = ioflg; 564 args.args.vop_args.vp = vp; 565 error = vn_io_fault1(vp, &auio, &args, td); 566 } else if (rw == UIO_READ) { 567 error = VOP_READ(vp, &auio, ioflg, cred); 568 } else /* if (rw == UIO_WRITE) */ { 569 error = VOP_WRITE(vp, &auio, ioflg, cred); 570 } 571 } 572 if (aresid) 573 *aresid = auio.uio_resid; 574 else 575 if (auio.uio_resid && error == 0) 576 error = EIO; 577 if ((ioflg & IO_NODELOCKED) == 0) { 578 VOP_UNLOCK(vp, 0); 579 if (mp != NULL) 580 vn_finished_write(mp); 581 } 582 out: 583 if (rl_cookie != NULL) 584 vn_rangelock_unlock(vp, rl_cookie); 585 return (error); 586} 587 588/* 589 * Package up an I/O request on a vnode into a uio and do it. The I/O 590 * request is split up into smaller chunks and we try to avoid saturating 591 * the buffer cache while potentially holding a vnode locked, so we 592 * check bwillwrite() before calling vn_rdwr(). We also call kern_yield() 593 * to give other processes a chance to lock the vnode (either other processes 594 * core'ing the same binary, or unrelated processes scanning the directory). 595 */ 596int 597vn_rdwr_inchunks(rw, vp, base, len, offset, segflg, ioflg, active_cred, 598 file_cred, aresid, td) 599 enum uio_rw rw; 600 struct vnode *vp; 601 void *base; 602 size_t len; 603 off_t offset; 604 enum uio_seg segflg; 605 int ioflg; 606 struct ucred *active_cred; 607 struct ucred *file_cred; 608 size_t *aresid; 609 struct thread *td; 610{ 611 int error = 0; 612 ssize_t iaresid; 613 614 do { 615 int chunk; 616 617 /* 618 * Force `offset' to a multiple of MAXBSIZE except possibly 619 * for the first chunk, so that filesystems only need to 620 * write full blocks except possibly for the first and last 621 * chunks. 622 */ 623 chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE; 624 625 if (chunk > len) 626 chunk = len; 627 if (rw != UIO_READ && vp->v_type == VREG) 628 bwillwrite(); 629 iaresid = 0; 630 error = vn_rdwr(rw, vp, base, chunk, offset, segflg, 631 ioflg, active_cred, file_cred, &iaresid, td); 632 len -= chunk; /* aresid calc already includes length */ 633 if (error) 634 break; 635 offset += chunk; 636 base = (char *)base + chunk; 637 kern_yield(PRI_USER); 638 } while (len); 639 if (aresid) 640 *aresid = len + iaresid; 641 return (error); 642} 643 644off_t 645foffset_lock(struct file *fp, int flags) 646{ 647 struct mtx *mtxp; 648 off_t res; 649 650 KASSERT((flags & FOF_OFFSET) == 0, ("FOF_OFFSET passed")); 651 652#if OFF_MAX <= LONG_MAX 653 /* 654 * Caller only wants the current f_offset value. Assume that 655 * the long and shorter integer types reads are atomic. 656 */ 657 if ((flags & FOF_NOLOCK) != 0) 658 return (fp->f_offset); 659#endif 660 661 /* 662 * According to McKusick the vn lock was protecting f_offset here. 663 * It is now protected by the FOFFSET_LOCKED flag. 664 */ 665 mtxp = mtx_pool_find(mtxpool_sleep, fp); 666 mtx_lock(mtxp); 667 if ((flags & FOF_NOLOCK) == 0) { 668 while (fp->f_vnread_flags & FOFFSET_LOCKED) { 669 fp->f_vnread_flags |= FOFFSET_LOCK_WAITING; 670 msleep(&fp->f_vnread_flags, mtxp, PUSER -1, 671 "vofflock", 0); 672 } 673 fp->f_vnread_flags |= FOFFSET_LOCKED; 674 } 675 res = fp->f_offset; 676 mtx_unlock(mtxp); 677 return (res); 678} 679 680void 681foffset_unlock(struct file *fp, off_t val, int flags) 682{ 683 struct mtx *mtxp; 684 685 KASSERT((flags & FOF_OFFSET) == 0, ("FOF_OFFSET passed")); 686 687#if OFF_MAX <= LONG_MAX 688 if ((flags & FOF_NOLOCK) != 0) { 689 if ((flags & FOF_NOUPDATE) == 0) 690 fp->f_offset = val; 691 if ((flags & FOF_NEXTOFF) != 0) 692 fp->f_nextoff = val; 693 return; 694 } 695#endif 696 697 mtxp = mtx_pool_find(mtxpool_sleep, fp); 698 mtx_lock(mtxp); 699 if ((flags & FOF_NOUPDATE) == 0) 700 fp->f_offset = val; 701 if ((flags & FOF_NEXTOFF) != 0) 702 fp->f_nextoff = val; 703 if ((flags & FOF_NOLOCK) == 0) { 704 KASSERT((fp->f_vnread_flags & FOFFSET_LOCKED) != 0, 705 ("Lost FOFFSET_LOCKED")); 706 if (fp->f_vnread_flags & FOFFSET_LOCK_WAITING) 707 wakeup(&fp->f_vnread_flags); 708 fp->f_vnread_flags = 0; 709 } 710 mtx_unlock(mtxp); 711} 712 713void 714foffset_lock_uio(struct file *fp, struct uio *uio, int flags) 715{ 716 717 if ((flags & FOF_OFFSET) == 0) 718 uio->uio_offset = foffset_lock(fp, flags); 719} 720 721void 722foffset_unlock_uio(struct file *fp, struct uio *uio, int flags) 723{ 724 725 if ((flags & FOF_OFFSET) == 0) 726 foffset_unlock(fp, uio->uio_offset, flags); 727} 728 729static int 730get_advice(struct file *fp, struct uio *uio) 731{ 732 struct mtx *mtxp; 733 int ret; 734 735 ret = POSIX_FADV_NORMAL; 736 if (fp->f_advice == NULL || fp->f_vnode->v_type != VREG) 737 return (ret); 738 739 mtxp = mtx_pool_find(mtxpool_sleep, fp); 740 mtx_lock(mtxp); 741 if (fp->f_advice != NULL && 742 uio->uio_offset >= fp->f_advice->fa_start && 743 uio->uio_offset + uio->uio_resid <= fp->f_advice->fa_end) 744 ret = fp->f_advice->fa_advice; 745 mtx_unlock(mtxp); 746 return (ret); 747} 748 749/* 750 * File table vnode read routine. 751 */ 752static int 753vn_read(fp, uio, active_cred, flags, td) 754 struct file *fp; 755 struct uio *uio; 756 struct ucred *active_cred; 757 int flags; 758 struct thread *td; 759{ 760 struct vnode *vp; 761 struct mtx *mtxp; 762 int error, ioflag; 763 int advice; 764 off_t offset, start, end; 765 766 KASSERT(uio->uio_td == td, ("uio_td %p is not td %p", 767 uio->uio_td, td)); 768 KASSERT(flags & FOF_OFFSET, ("No FOF_OFFSET")); 769 vp = fp->f_vnode; 770 ioflag = 0; 771 if (fp->f_flag & FNONBLOCK) 772 ioflag |= IO_NDELAY; 773 if (fp->f_flag & O_DIRECT) 774 ioflag |= IO_DIRECT; 775 advice = get_advice(fp, uio); 776 vn_lock(vp, LK_SHARED | LK_RETRY); 777 778 switch (advice) { 779 case POSIX_FADV_NORMAL: 780 case POSIX_FADV_SEQUENTIAL: 781 case POSIX_FADV_NOREUSE: 782 ioflag |= sequential_heuristic(uio, fp); 783 break; 784 case POSIX_FADV_RANDOM: 785 /* Disable read-ahead for random I/O. */ 786 break; 787 } 788 offset = uio->uio_offset; 789 790#ifdef MAC 791 error = mac_vnode_check_read(active_cred, fp->f_cred, vp); 792 if (error == 0) 793#endif 794 error = VOP_READ(vp, uio, ioflag, fp->f_cred); 795 fp->f_nextoff = uio->uio_offset; 796 VOP_UNLOCK(vp, 0); 797 if (error == 0 && advice == POSIX_FADV_NOREUSE && 798 offset != uio->uio_offset) { 799 /* 800 * Use POSIX_FADV_DONTNEED to flush clean pages and 801 * buffers for the backing file after a 802 * POSIX_FADV_NOREUSE read(2). To optimize the common 803 * case of using POSIX_FADV_NOREUSE with sequential 804 * access, track the previous implicit DONTNEED 805 * request and grow this request to include the 806 * current read(2) in addition to the previous 807 * DONTNEED. With purely sequential access this will 808 * cause the DONTNEED requests to continously grow to 809 * cover all of the previously read regions of the 810 * file. This allows filesystem blocks that are 811 * accessed by multiple calls to read(2) to be flushed 812 * once the last read(2) finishes. 813 */ 814 start = offset; 815 end = uio->uio_offset - 1; 816 mtxp = mtx_pool_find(mtxpool_sleep, fp); 817 mtx_lock(mtxp); 818 if (fp->f_advice != NULL && 819 fp->f_advice->fa_advice == POSIX_FADV_NOREUSE) { 820 if (start != 0 && fp->f_advice->fa_prevend + 1 == start) 821 start = fp->f_advice->fa_prevstart; 822 else if (fp->f_advice->fa_prevstart != 0 && 823 fp->f_advice->fa_prevstart == end + 1) 824 end = fp->f_advice->fa_prevend; 825 fp->f_advice->fa_prevstart = start; 826 fp->f_advice->fa_prevend = end; 827 } 828 mtx_unlock(mtxp); 829 error = VOP_ADVISE(vp, start, end, POSIX_FADV_DONTNEED); 830 } 831 return (error); 832} 833 834/* 835 * File table vnode write routine. 836 */ 837static int 838vn_write(fp, uio, active_cred, flags, td) 839 struct file *fp; 840 struct uio *uio; 841 struct ucred *active_cred; 842 int flags; 843 struct thread *td; 844{ 845 struct vnode *vp; 846 struct mount *mp; 847 struct mtx *mtxp; 848 int error, ioflag, lock_flags; 849 int advice; 850 off_t offset, start, end; 851 852 KASSERT(uio->uio_td == td, ("uio_td %p is not td %p", 853 uio->uio_td, td)); 854 KASSERT(flags & FOF_OFFSET, ("No FOF_OFFSET")); 855 vp = fp->f_vnode; 856 if (vp->v_type == VREG) 857 bwillwrite(); 858 ioflag = IO_UNIT; 859 if (vp->v_type == VREG && (fp->f_flag & O_APPEND)) 860 ioflag |= IO_APPEND; 861 if (fp->f_flag & FNONBLOCK) 862 ioflag |= IO_NDELAY; 863 if (fp->f_flag & O_DIRECT) 864 ioflag |= IO_DIRECT; 865 if ((fp->f_flag & O_FSYNC) || 866 (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))) 867 ioflag |= IO_SYNC; 868 mp = NULL; 869 if (vp->v_type != VCHR && 870 (error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0) 871 goto unlock; 872 873 advice = get_advice(fp, uio); 874 875 if (MNT_SHARED_WRITES(mp) || 876 (mp == NULL && MNT_SHARED_WRITES(vp->v_mount))) { 877 lock_flags = LK_SHARED; 878 } else { 879 lock_flags = LK_EXCLUSIVE; 880 } 881 882 vn_lock(vp, lock_flags | LK_RETRY); 883 switch (advice) { 884 case POSIX_FADV_NORMAL: 885 case POSIX_FADV_SEQUENTIAL: 886 case POSIX_FADV_NOREUSE: 887 ioflag |= sequential_heuristic(uio, fp); 888 break; 889 case POSIX_FADV_RANDOM: 890 /* XXX: Is this correct? */ 891 break; 892 } 893 offset = uio->uio_offset; 894 895#ifdef MAC 896 error = mac_vnode_check_write(active_cred, fp->f_cred, vp); 897 if (error == 0) 898#endif 899 error = VOP_WRITE(vp, uio, ioflag, fp->f_cred); 900 fp->f_nextoff = uio->uio_offset; 901 VOP_UNLOCK(vp, 0); 902 if (vp->v_type != VCHR) 903 vn_finished_write(mp); 904 if (error == 0 && advice == POSIX_FADV_NOREUSE && 905 offset != uio->uio_offset) { 906 /* 907 * Use POSIX_FADV_DONTNEED to flush clean pages and 908 * buffers for the backing file after a 909 * POSIX_FADV_NOREUSE write(2). To optimize the 910 * common case of using POSIX_FADV_NOREUSE with 911 * sequential access, track the previous implicit 912 * DONTNEED request and grow this request to include 913 * the current write(2) in addition to the previous 914 * DONTNEED. With purely sequential access this will 915 * cause the DONTNEED requests to continously grow to 916 * cover all of the previously written regions of the 917 * file. 918 * 919 * Note that the blocks just written are almost 920 * certainly still dirty, so this only works when 921 * VOP_ADVISE() calls from subsequent writes push out 922 * the data written by this write(2) once the backing 923 * buffers are clean. However, as compared to forcing 924 * IO_DIRECT, this gives much saner behavior. Write 925 * clustering is still allowed, and clean pages are 926 * merely moved to the cache page queue rather than 927 * outright thrown away. This means a subsequent 928 * read(2) can still avoid hitting the disk if the 929 * pages have not been reclaimed. 930 * 931 * This does make POSIX_FADV_NOREUSE largely useless 932 * with non-sequential access. However, sequential 933 * access is the more common use case and the flag is 934 * merely advisory. 935 */ 936 start = offset; 937 end = uio->uio_offset - 1; 938 mtxp = mtx_pool_find(mtxpool_sleep, fp); 939 mtx_lock(mtxp); 940 if (fp->f_advice != NULL && 941 fp->f_advice->fa_advice == POSIX_FADV_NOREUSE) { 942 if (start != 0 && fp->f_advice->fa_prevend + 1 == start) 943 start = fp->f_advice->fa_prevstart; 944 else if (fp->f_advice->fa_prevstart != 0 && 945 fp->f_advice->fa_prevstart == end + 1) 946 end = fp->f_advice->fa_prevend; 947 fp->f_advice->fa_prevstart = start; 948 fp->f_advice->fa_prevend = end; 949 } 950 mtx_unlock(mtxp); 951 error = VOP_ADVISE(vp, start, end, POSIX_FADV_DONTNEED); 952 } 953 954unlock: 955 return (error); 956} 957 958/* 959 * The vn_io_fault() is a wrapper around vn_read() and vn_write() to 960 * prevent the following deadlock: 961 * 962 * Assume that the thread A reads from the vnode vp1 into userspace 963 * buffer buf1 backed by the pages of vnode vp2. If a page in buf1 is 964 * currently not resident, then system ends up with the call chain 965 * vn_read() -> VOP_READ(vp1) -> uiomove() -> [Page Fault] -> 966 * vm_fault(buf1) -> vnode_pager_getpages(vp2) -> VOP_GETPAGES(vp2) 967 * which establishes lock order vp1->vn_lock, then vp2->vn_lock. 968 * If, at the same time, thread B reads from vnode vp2 into buffer buf2 969 * backed by the pages of vnode vp1, and some page in buf2 is not 970 * resident, we get a reversed order vp2->vn_lock, then vp1->vn_lock. 971 * 972 * To prevent the lock order reversal and deadlock, vn_io_fault() does 973 * not allow page faults to happen during VOP_READ() or VOP_WRITE(). 974 * Instead, it first tries to do the whole range i/o with pagefaults 975 * disabled. If all pages in the i/o buffer are resident and mapped, 976 * VOP will succeed (ignoring the genuine filesystem errors). 977 * Otherwise, we get back EFAULT, and vn_io_fault() falls back to do 978 * i/o in chunks, with all pages in the chunk prefaulted and held 979 * using vm_fault_quick_hold_pages(). 980 * 981 * Filesystems using this deadlock avoidance scheme should use the 982 * array of the held pages from uio, saved in the curthread->td_ma, 983 * instead of doing uiomove(). A helper function 984 * vn_io_fault_uiomove() converts uiomove request into 985 * uiomove_fromphys() over td_ma array. 986 * 987 * Since vnode locks do not cover the whole i/o anymore, rangelocks 988 * make the current i/o request atomic with respect to other i/os and 989 * truncations. 990 */ 991 992/* 993 * Decode vn_io_fault_args and perform the corresponding i/o. 994 */ 995static int 996vn_io_fault_doio(struct vn_io_fault_args *args, struct uio *uio, 997 struct thread *td) 998{ 999 1000 switch (args->kind) { 1001 case VN_IO_FAULT_FOP: 1002 return ((args->args.fop_args.doio)(args->args.fop_args.fp, 1003 uio, args->cred, args->flags, td)); 1004 case VN_IO_FAULT_VOP: 1005 if (uio->uio_rw == UIO_READ) { 1006 return (VOP_READ(args->args.vop_args.vp, uio, 1007 args->flags, args->cred)); 1008 } else if (uio->uio_rw == UIO_WRITE) { 1009 return (VOP_WRITE(args->args.vop_args.vp, uio, 1010 args->flags, args->cred)); 1011 } 1012 break; 1013 } 1014 panic("vn_io_fault_doio: unknown kind of io %d %d", args->kind, 1015 uio->uio_rw); 1016} 1017 1018static int 1019vn_io_fault_touch(char *base, const struct uio *uio) 1020{ 1021 int r; 1022 1023 r = fubyte(base); 1024 if (r == -1 || (uio->uio_rw == UIO_READ && subyte(base, r) == -1)) 1025 return (EFAULT); 1026 return (0); 1027} 1028 1029static int 1030vn_io_fault_prefault_user(const struct uio *uio) 1031{ 1032 char *base; 1033 const struct iovec *iov; 1034 size_t len; 1035 ssize_t resid; 1036 int error, i; 1037 1038 KASSERT(uio->uio_segflg == UIO_USERSPACE, 1039 ("vn_io_fault_prefault userspace")); 1040 1041 error = i = 0; 1042 iov = uio->uio_iov; 1043 resid = uio->uio_resid; 1044 base = iov->iov_base; 1045 len = iov->iov_len; 1046 while (resid > 0) { 1047 error = vn_io_fault_touch(base, uio); 1048 if (error != 0) 1049 break; 1050 if (len < PAGE_SIZE) { 1051 if (len != 0) { 1052 error = vn_io_fault_touch(base + len - 1, uio); 1053 if (error != 0) 1054 break; 1055 resid -= len; 1056 } 1057 if (++i >= uio->uio_iovcnt) 1058 break; 1059 iov = uio->uio_iov + i; 1060 base = iov->iov_base; 1061 len = iov->iov_len; 1062 } else { 1063 len -= PAGE_SIZE; 1064 base += PAGE_SIZE; 1065 resid -= PAGE_SIZE; 1066 } 1067 } 1068 return (error); 1069} 1070 1071/* 1072 * Common code for vn_io_fault(), agnostic to the kind of i/o request. 1073 * Uses vn_io_fault_doio() to make the call to an actual i/o function. 1074 * Used from vn_rdwr() and vn_io_fault(), which encode the i/o request 1075 * into args and call vn_io_fault1() to handle faults during the user 1076 * mode buffer accesses. 1077 */ 1078static int 1079vn_io_fault1(struct vnode *vp, struct uio *uio, struct vn_io_fault_args *args, 1080 struct thread *td) 1081{ 1082 vm_page_t ma[io_hold_cnt + 2]; 1083 struct uio *uio_clone, short_uio; 1084 struct iovec short_iovec[1]; 1085 vm_page_t *prev_td_ma; 1086 vm_prot_t prot; 1087 vm_offset_t addr, end; 1088 size_t len, resid; 1089 ssize_t adv; 1090 int error, cnt, save, saveheld, prev_td_ma_cnt; 1091 1092 if (vn_io_fault_prefault) { 1093 error = vn_io_fault_prefault_user(uio); 1094 if (error != 0) 1095 return (error); /* Or ignore ? */ 1096 } 1097 1098 prot = uio->uio_rw == UIO_READ ? VM_PROT_WRITE : VM_PROT_READ; 1099 1100 /* 1101 * The UFS follows IO_UNIT directive and replays back both 1102 * uio_offset and uio_resid if an error is encountered during the 1103 * operation. But, since the iovec may be already advanced, 1104 * uio is still in an inconsistent state. 1105 * 1106 * Cache a copy of the original uio, which is advanced to the redo 1107 * point using UIO_NOCOPY below. 1108 */ 1109 uio_clone = cloneuio(uio); 1110 resid = uio->uio_resid; 1111 1112 short_uio.uio_segflg = UIO_USERSPACE; 1113 short_uio.uio_rw = uio->uio_rw; 1114 short_uio.uio_td = uio->uio_td; 1115 1116 save = vm_fault_disable_pagefaults(); 1117 error = vn_io_fault_doio(args, uio, td); 1118 if (error != EFAULT) 1119 goto out; 1120 1121 atomic_add_long(&vn_io_faults_cnt, 1); 1122 uio_clone->uio_segflg = UIO_NOCOPY; 1123 uiomove(NULL, resid - uio->uio_resid, uio_clone); 1124 uio_clone->uio_segflg = uio->uio_segflg; 1125 1126 saveheld = curthread_pflags_set(TDP_UIOHELD); 1127 prev_td_ma = td->td_ma; 1128 prev_td_ma_cnt = td->td_ma_cnt; 1129 1130 while (uio_clone->uio_resid != 0) { 1131 len = uio_clone->uio_iov->iov_len; 1132 if (len == 0) { 1133 KASSERT(uio_clone->uio_iovcnt >= 1, 1134 ("iovcnt underflow")); 1135 uio_clone->uio_iov++; 1136 uio_clone->uio_iovcnt--; 1137 continue; 1138 } 1139 if (len > io_hold_cnt * PAGE_SIZE) 1140 len = io_hold_cnt * PAGE_SIZE; 1141 addr = (uintptr_t)uio_clone->uio_iov->iov_base; 1142 end = round_page(addr + len); 1143 if (end < addr) { 1144 error = EFAULT; 1145 break; 1146 } 1147 cnt = atop(end - trunc_page(addr)); 1148 /* 1149 * A perfectly misaligned address and length could cause 1150 * both the start and the end of the chunk to use partial 1151 * page. +2 accounts for such a situation. 1152 */ 1153 cnt = vm_fault_quick_hold_pages(&td->td_proc->p_vmspace->vm_map, 1154 addr, len, prot, ma, io_hold_cnt + 2); 1155 if (cnt == -1) { 1156 error = EFAULT; 1157 break; 1158 } 1159 short_uio.uio_iov = &short_iovec[0]; 1160 short_iovec[0].iov_base = (void *)addr; 1161 short_uio.uio_iovcnt = 1; 1162 short_uio.uio_resid = short_iovec[0].iov_len = len; 1163 short_uio.uio_offset = uio_clone->uio_offset; 1164 td->td_ma = ma; 1165 td->td_ma_cnt = cnt; 1166 1167 error = vn_io_fault_doio(args, &short_uio, td); 1168 vm_page_unhold_pages(ma, cnt); 1169 adv = len - short_uio.uio_resid; 1170 1171 uio_clone->uio_iov->iov_base = 1172 (char *)uio_clone->uio_iov->iov_base + adv; 1173 uio_clone->uio_iov->iov_len -= adv; 1174 uio_clone->uio_resid -= adv; 1175 uio_clone->uio_offset += adv; 1176 1177 uio->uio_resid -= adv; 1178 uio->uio_offset += adv; 1179 1180 if (error != 0 || adv == 0) 1181 break; 1182 } 1183 td->td_ma = prev_td_ma; 1184 td->td_ma_cnt = prev_td_ma_cnt; 1185 curthread_pflags_restore(saveheld); 1186out: 1187 vm_fault_enable_pagefaults(save); 1188 free(uio_clone, M_IOV); 1189 return (error); 1190} 1191 1192static int 1193vn_io_fault(struct file *fp, struct uio *uio, struct ucred *active_cred, 1194 int flags, struct thread *td) 1195{ 1196 fo_rdwr_t *doio; 1197 struct vnode *vp; 1198 void *rl_cookie; 1199 struct vn_io_fault_args args; 1200 int error; 1201 1202 doio = uio->uio_rw == UIO_READ ? vn_read : vn_write; 1203 vp = fp->f_vnode; 1204 foffset_lock_uio(fp, uio, flags); 1205 if (do_vn_io_fault(vp, uio)) { 1206 args.kind = VN_IO_FAULT_FOP; 1207 args.args.fop_args.fp = fp; 1208 args.args.fop_args.doio = doio; 1209 args.cred = active_cred; 1210 args.flags = flags | FOF_OFFSET; 1211 if (uio->uio_rw == UIO_READ) { 1212 rl_cookie = vn_rangelock_rlock(vp, uio->uio_offset, 1213 uio->uio_offset + uio->uio_resid); 1214 } else if ((fp->f_flag & O_APPEND) != 0 || 1215 (flags & FOF_OFFSET) == 0) { 1216 /* For appenders, punt and lock the whole range. */ 1217 rl_cookie = vn_rangelock_wlock(vp, 0, OFF_MAX); 1218 } else { 1219 rl_cookie = vn_rangelock_wlock(vp, uio->uio_offset, 1220 uio->uio_offset + uio->uio_resid); 1221 } 1222 error = vn_io_fault1(vp, uio, &args, td); 1223 vn_rangelock_unlock(vp, rl_cookie); 1224 } else { 1225 error = doio(fp, uio, active_cred, flags | FOF_OFFSET, td); 1226 } 1227 foffset_unlock_uio(fp, uio, flags); 1228 return (error); 1229} 1230 1231/* 1232 * Helper function to perform the requested uiomove operation using 1233 * the held pages for io->uio_iov[0].iov_base buffer instead of 1234 * copyin/copyout. Access to the pages with uiomove_fromphys() 1235 * instead of iov_base prevents page faults that could occur due to 1236 * pmap_collect() invalidating the mapping created by 1237 * vm_fault_quick_hold_pages(), or pageout daemon, page laundry or 1238 * object cleanup revoking the write access from page mappings. 1239 * 1240 * Filesystems specified MNTK_NO_IOPF shall use vn_io_fault_uiomove() 1241 * instead of plain uiomove(). 1242 */ 1243int 1244vn_io_fault_uiomove(char *data, int xfersize, struct uio *uio) 1245{ 1246 struct uio transp_uio; 1247 struct iovec transp_iov[1]; 1248 struct thread *td; 1249 size_t adv; 1250 int error, pgadv; 1251 1252 td = curthread; 1253 if ((td->td_pflags & TDP_UIOHELD) == 0 || 1254 uio->uio_segflg != UIO_USERSPACE) 1255 return (uiomove(data, xfersize, uio)); 1256 1257 KASSERT(uio->uio_iovcnt == 1, ("uio_iovcnt %d", uio->uio_iovcnt)); 1258 transp_iov[0].iov_base = data; 1259 transp_uio.uio_iov = &transp_iov[0]; 1260 transp_uio.uio_iovcnt = 1; 1261 if (xfersize > uio->uio_resid) 1262 xfersize = uio->uio_resid; 1263 transp_uio.uio_resid = transp_iov[0].iov_len = xfersize; 1264 transp_uio.uio_offset = 0; 1265 transp_uio.uio_segflg = UIO_SYSSPACE; 1266 /* 1267 * Since transp_iov points to data, and td_ma page array 1268 * corresponds to original uio->uio_iov, we need to invert the 1269 * direction of the i/o operation as passed to 1270 * uiomove_fromphys(). 1271 */ 1272 switch (uio->uio_rw) { 1273 case UIO_WRITE: 1274 transp_uio.uio_rw = UIO_READ; 1275 break; 1276 case UIO_READ: 1277 transp_uio.uio_rw = UIO_WRITE; 1278 break; 1279 } 1280 transp_uio.uio_td = uio->uio_td; 1281 error = uiomove_fromphys(td->td_ma, 1282 ((vm_offset_t)uio->uio_iov->iov_base) & PAGE_MASK, 1283 xfersize, &transp_uio); 1284 adv = xfersize - transp_uio.uio_resid; 1285 pgadv = 1286 (((vm_offset_t)uio->uio_iov->iov_base + adv) >> PAGE_SHIFT) - 1287 (((vm_offset_t)uio->uio_iov->iov_base) >> PAGE_SHIFT); 1288 td->td_ma += pgadv; 1289 KASSERT(td->td_ma_cnt >= pgadv, ("consumed pages %d %d", td->td_ma_cnt, 1290 pgadv)); 1291 td->td_ma_cnt -= pgadv; 1292 uio->uio_iov->iov_base = (char *)uio->uio_iov->iov_base + adv; 1293 uio->uio_iov->iov_len -= adv; 1294 uio->uio_resid -= adv; 1295 uio->uio_offset += adv; 1296 return (error); 1297} 1298 1299int 1300vn_io_fault_pgmove(vm_page_t ma[], vm_offset_t offset, int xfersize, 1301 struct uio *uio) 1302{ 1303 struct thread *td; 1304 vm_offset_t iov_base; 1305 int cnt, pgadv; 1306 1307 td = curthread; 1308 if ((td->td_pflags & TDP_UIOHELD) == 0 || 1309 uio->uio_segflg != UIO_USERSPACE) 1310 return (uiomove_fromphys(ma, offset, xfersize, uio)); 1311 1312 KASSERT(uio->uio_iovcnt == 1, ("uio_iovcnt %d", uio->uio_iovcnt)); 1313 cnt = xfersize > uio->uio_resid ? uio->uio_resid : xfersize; 1314 iov_base = (vm_offset_t)uio->uio_iov->iov_base; 1315 switch (uio->uio_rw) { 1316 case UIO_WRITE: 1317 pmap_copy_pages(td->td_ma, iov_base & PAGE_MASK, ma, 1318 offset, cnt); 1319 break; 1320 case UIO_READ: 1321 pmap_copy_pages(ma, offset, td->td_ma, iov_base & PAGE_MASK, 1322 cnt); 1323 break; 1324 } 1325 pgadv = ((iov_base + cnt) >> PAGE_SHIFT) - (iov_base >> PAGE_SHIFT); 1326 td->td_ma += pgadv; 1327 KASSERT(td->td_ma_cnt >= pgadv, ("consumed pages %d %d", td->td_ma_cnt, 1328 pgadv)); 1329 td->td_ma_cnt -= pgadv; 1330 uio->uio_iov->iov_base = (char *)(iov_base + cnt); 1331 uio->uio_iov->iov_len -= cnt; 1332 uio->uio_resid -= cnt; 1333 uio->uio_offset += cnt; 1334 return (0); 1335} 1336 1337 1338/* 1339 * File table truncate routine. 1340 */ 1341static int 1342vn_truncate(struct file *fp, off_t length, struct ucred *active_cred, 1343 struct thread *td) 1344{ 1345 struct vattr vattr; 1346 struct mount *mp; 1347 struct vnode *vp; 1348 void *rl_cookie; 1349 int error; 1350 1351 vp = fp->f_vnode; 1352 1353 /* 1354 * Lock the whole range for truncation. Otherwise split i/o 1355 * might happen partly before and partly after the truncation. 1356 */ 1357 rl_cookie = vn_rangelock_wlock(vp, 0, OFF_MAX); 1358 error = vn_start_write(vp, &mp, V_WAIT | PCATCH); 1359 if (error) 1360 goto out1; 1361 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1362 if (vp->v_type == VDIR) { 1363 error = EISDIR; 1364 goto out; 1365 } 1366#ifdef MAC 1367 error = mac_vnode_check_write(active_cred, fp->f_cred, vp); 1368 if (error) 1369 goto out; 1370#endif 1371 error = vn_writechk(vp); 1372 if (error == 0) { 1373 VATTR_NULL(&vattr); 1374 vattr.va_size = length; 1375 if ((fp->f_flag & O_FSYNC) != 0) 1376 vattr.va_vaflags |= VA_SYNC; 1377 error = VOP_SETATTR(vp, &vattr, fp->f_cred); 1378 } 1379out: 1380 VOP_UNLOCK(vp, 0); 1381 vn_finished_write(mp); 1382out1: 1383 vn_rangelock_unlock(vp, rl_cookie); 1384 return (error); 1385} 1386 1387/* 1388 * File table vnode stat routine. 1389 */ 1390static int 1391vn_statfile(fp, sb, active_cred, td) 1392 struct file *fp; 1393 struct stat *sb; 1394 struct ucred *active_cred; 1395 struct thread *td; 1396{ 1397 struct vnode *vp = fp->f_vnode; 1398 int error; 1399 1400 vn_lock(vp, LK_SHARED | LK_RETRY); 1401 error = vn_stat(vp, sb, active_cred, fp->f_cred, td); 1402 VOP_UNLOCK(vp, 0); 1403 1404 return (error); 1405} 1406 1407/* 1408 * Stat a vnode; implementation for the stat syscall 1409 */ 1410int 1411vn_stat(vp, sb, active_cred, file_cred, td) 1412 struct vnode *vp; 1413 register struct stat *sb; 1414 struct ucred *active_cred; 1415 struct ucred *file_cred; 1416 struct thread *td; 1417{ 1418 struct vattr vattr; 1419 register struct vattr *vap; 1420 int error; 1421 u_short mode; 1422 1423#ifdef MAC 1424 error = mac_vnode_check_stat(active_cred, file_cred, vp); 1425 if (error) 1426 return (error); 1427#endif 1428 1429 vap = &vattr; 1430 1431 /* 1432 * Initialize defaults for new and unusual fields, so that file 1433 * systems which don't support these fields don't need to know 1434 * about them. 1435 */ 1436 vap->va_birthtime.tv_sec = -1; 1437 vap->va_birthtime.tv_nsec = 0; 1438 vap->va_fsid = VNOVAL; 1439 vap->va_rdev = NODEV; 1440 1441 error = VOP_GETATTR(vp, vap, active_cred); 1442 if (error) 1443 return (error); 1444 1445 /* 1446 * Zero the spare stat fields 1447 */ 1448 bzero(sb, sizeof *sb); 1449 1450 /* 1451 * Copy from vattr table 1452 */ 1453 if (vap->va_fsid != VNOVAL) 1454 sb->st_dev = vap->va_fsid; 1455 else 1456 sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0]; 1457 sb->st_ino = vap->va_fileid; 1458 mode = vap->va_mode; 1459 switch (vap->va_type) { 1460 case VREG: 1461 mode |= S_IFREG; 1462 break; 1463 case VDIR: 1464 mode |= S_IFDIR; 1465 break; 1466 case VBLK: 1467 mode |= S_IFBLK; 1468 break; 1469 case VCHR: 1470 mode |= S_IFCHR; 1471 break; 1472 case VLNK: 1473 mode |= S_IFLNK; 1474 break; 1475 case VSOCK: 1476 mode |= S_IFSOCK; 1477 break; 1478 case VFIFO: 1479 mode |= S_IFIFO; 1480 break; 1481 default: 1482 return (EBADF); 1483 }; 1484 sb->st_mode = mode; 1485 sb->st_nlink = vap->va_nlink; 1486 sb->st_uid = vap->va_uid; 1487 sb->st_gid = vap->va_gid; 1488 sb->st_rdev = vap->va_rdev; 1489 if (vap->va_size > OFF_MAX) 1490 return (EOVERFLOW); 1491 sb->st_size = vap->va_size; 1492 sb->st_atim = vap->va_atime; 1493 sb->st_mtim = vap->va_mtime; 1494 sb->st_ctim = vap->va_ctime; 1495 sb->st_birthtim = vap->va_birthtime; 1496 1497 /* 1498 * According to www.opengroup.org, the meaning of st_blksize is 1499 * "a filesystem-specific preferred I/O block size for this 1500 * object. In some filesystem types, this may vary from file 1501 * to file" 1502 * Use miminum/default of PAGE_SIZE (e.g. for VCHR). 1503 */ 1504 1505 sb->st_blksize = max(PAGE_SIZE, vap->va_blocksize); 1506 1507 sb->st_flags = vap->va_flags; 1508 if (priv_check(td, PRIV_VFS_GENERATION)) 1509 sb->st_gen = 0; 1510 else 1511 sb->st_gen = vap->va_gen; 1512 1513 sb->st_blocks = vap->va_bytes / S_BLKSIZE; 1514 return (0); 1515} 1516 1517/* 1518 * File table vnode ioctl routine. 1519 */ 1520static int 1521vn_ioctl(fp, com, data, active_cred, td) 1522 struct file *fp; 1523 u_long com; 1524 void *data; 1525 struct ucred *active_cred; 1526 struct thread *td; 1527{ 1528 struct vattr vattr; 1529 struct vnode *vp; 1530 int error; 1531 1532 vp = fp->f_vnode; 1533 switch (vp->v_type) { 1534 case VDIR: 1535 case VREG: 1536 switch (com) { 1537 case FIONREAD: 1538 vn_lock(vp, LK_SHARED | LK_RETRY); 1539 error = VOP_GETATTR(vp, &vattr, active_cred); 1540 VOP_UNLOCK(vp, 0); 1541 if (error == 0) 1542 *(int *)data = vattr.va_size - fp->f_offset; 1543 return (error); 1544 case FIONBIO: 1545 case FIOASYNC: 1546 return (0); 1547 default: 1548 return (VOP_IOCTL(vp, com, data, fp->f_flag, 1549 active_cred, td)); 1550 } 1551 default: 1552 return (ENOTTY); 1553 } 1554} 1555 1556/* 1557 * File table vnode poll routine. 1558 */ 1559static int 1560vn_poll(fp, events, active_cred, td) 1561 struct file *fp; 1562 int events; 1563 struct ucred *active_cred; 1564 struct thread *td; 1565{ 1566 struct vnode *vp; 1567 int error; 1568 1569 vp = fp->f_vnode; 1570#ifdef MAC 1571 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1572 error = mac_vnode_check_poll(active_cred, fp->f_cred, vp); 1573 VOP_UNLOCK(vp, 0); 1574 if (!error) 1575#endif 1576 1577 error = VOP_POLL(vp, events, fp->f_cred, td); 1578 return (error); 1579} 1580 1581/* 1582 * Acquire the requested lock and then check for validity. LK_RETRY 1583 * permits vn_lock to return doomed vnodes. 1584 */ 1585int 1586_vn_lock(struct vnode *vp, int flags, char *file, int line) 1587{ 1588 int error; 1589 1590 VNASSERT((flags & LK_TYPE_MASK) != 0, vp, 1591 ("vn_lock called with no locktype.")); 1592 do { 1593#ifdef DEBUG_VFS_LOCKS 1594 KASSERT(vp->v_holdcnt != 0, 1595 ("vn_lock %p: zero hold count", vp)); 1596#endif 1597 error = VOP_LOCK1(vp, flags, file, line); 1598 flags &= ~LK_INTERLOCK; /* Interlock is always dropped. */ 1599 KASSERT((flags & LK_RETRY) == 0 || error == 0, 1600 ("LK_RETRY set with incompatible flags (0x%x) or an error occurred (%d)", 1601 flags, error)); 1602 /* 1603 * Callers specify LK_RETRY if they wish to get dead vnodes. 1604 * If RETRY is not set, we return ENOENT instead. 1605 */ 1606 if (error == 0 && vp->v_iflag & VI_DOOMED && 1607 (flags & LK_RETRY) == 0) { 1608 VOP_UNLOCK(vp, 0); 1609 error = ENOENT; 1610 break; 1611 } 1612 } while (flags & LK_RETRY && error != 0); 1613 return (error); 1614} 1615 1616/* 1617 * File table vnode close routine. 1618 */ 1619static int 1620vn_closefile(fp, td) 1621 struct file *fp; 1622 struct thread *td; 1623{ 1624 struct vnode *vp; 1625 struct flock lf; 1626 int error; 1627 1628 vp = fp->f_vnode; 1629 fp->f_ops = &badfileops; 1630 1631 if (fp->f_type == DTYPE_VNODE && fp->f_flag & FHASLOCK) 1632 vref(vp); 1633 1634 error = vn_close(vp, fp->f_flag, fp->f_cred, td); 1635 1636 if (fp->f_type == DTYPE_VNODE && fp->f_flag & FHASLOCK) { 1637 lf.l_whence = SEEK_SET; 1638 lf.l_start = 0; 1639 lf.l_len = 0; 1640 lf.l_type = F_UNLCK; 1641 (void) VOP_ADVLOCK(vp, fp, F_UNLCK, &lf, F_FLOCK); 1642 vrele(vp); 1643 } 1644 return (error); 1645} 1646 1647/* 1648 * Preparing to start a filesystem write operation. If the operation is 1649 * permitted, then we bump the count of operations in progress and 1650 * proceed. If a suspend request is in progress, we wait until the 1651 * suspension is over, and then proceed. 1652 */ 1653static int 1654vn_start_write_locked(struct mount *mp, int flags) 1655{ 1656 int error, mflags; 1657 1658 mtx_assert(MNT_MTX(mp), MA_OWNED); 1659 error = 0; 1660 1661 /* 1662 * Check on status of suspension. 1663 */ 1664 if ((curthread->td_pflags & TDP_IGNSUSP) == 0 || 1665 mp->mnt_susp_owner != curthread) { 1666 mflags = ((mp->mnt_vfc->vfc_flags & VFCF_SBDRY) != 0 ? 1667 (flags & PCATCH) : 0) | (PUSER - 1); 1668 while ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) { 1669 if (flags & V_NOWAIT) { 1670 error = EWOULDBLOCK; 1671 goto unlock; 1672 } 1673 error = msleep(&mp->mnt_flag, MNT_MTX(mp), mflags, 1674 "suspfs", 0); 1675 if (error) 1676 goto unlock; 1677 } 1678 } 1679 if (flags & V_XSLEEP) 1680 goto unlock; 1681 mp->mnt_writeopcount++; 1682unlock: 1683 if (error != 0 || (flags & V_XSLEEP) != 0) 1684 MNT_REL(mp); 1685 MNT_IUNLOCK(mp); 1686 return (error); 1687} 1688 1689int 1690vn_start_write(struct vnode *vp, struct mount **mpp, int flags) 1691{ 1692 struct mount *mp; 1693 int error; 1694 1695 KASSERT((flags & V_MNTREF) == 0 || (*mpp != NULL && vp == NULL), 1696 ("V_MNTREF requires mp")); 1697 1698 error = 0; 1699 /* 1700 * If a vnode is provided, get and return the mount point that 1701 * to which it will write. 1702 */ 1703 if (vp != NULL) { 1704 if ((error = VOP_GETWRITEMOUNT(vp, mpp)) != 0) { 1705 *mpp = NULL; 1706 if (error != EOPNOTSUPP) 1707 return (error); 1708 return (0); 1709 } 1710 } 1711 if ((mp = *mpp) == NULL) 1712 return (0); 1713 1714 /* 1715 * VOP_GETWRITEMOUNT() returns with the mp refcount held through 1716 * a vfs_ref(). 1717 * As long as a vnode is not provided we need to acquire a 1718 * refcount for the provided mountpoint too, in order to 1719 * emulate a vfs_ref(). 1720 */ 1721 MNT_ILOCK(mp); 1722 if (vp == NULL && (flags & V_MNTREF) == 0) 1723 MNT_REF(mp); 1724 1725 return (vn_start_write_locked(mp, flags)); 1726} 1727 1728/* 1729 * Secondary suspension. Used by operations such as vop_inactive 1730 * routines that are needed by the higher level functions. These 1731 * are allowed to proceed until all the higher level functions have 1732 * completed (indicated by mnt_writeopcount dropping to zero). At that 1733 * time, these operations are halted until the suspension is over. 1734 */ 1735int 1736vn_start_secondary_write(struct vnode *vp, struct mount **mpp, int flags) 1737{ 1738 struct mount *mp; 1739 int error; 1740 1741 KASSERT((flags & V_MNTREF) == 0 || (*mpp != NULL && vp == NULL), 1742 ("V_MNTREF requires mp")); 1743 1744 retry: 1745 if (vp != NULL) { 1746 if ((error = VOP_GETWRITEMOUNT(vp, mpp)) != 0) { 1747 *mpp = NULL; 1748 if (error != EOPNOTSUPP) 1749 return (error); 1750 return (0); 1751 } 1752 } 1753 /* 1754 * If we are not suspended or have not yet reached suspended 1755 * mode, then let the operation proceed. 1756 */ 1757 if ((mp = *mpp) == NULL) 1758 return (0); 1759 1760 /* 1761 * VOP_GETWRITEMOUNT() returns with the mp refcount held through 1762 * a vfs_ref(). 1763 * As long as a vnode is not provided we need to acquire a 1764 * refcount for the provided mountpoint too, in order to 1765 * emulate a vfs_ref(). 1766 */ 1767 MNT_ILOCK(mp); 1768 if (vp == NULL && (flags & V_MNTREF) == 0) 1769 MNT_REF(mp); 1770 if ((mp->mnt_kern_flag & (MNTK_SUSPENDED | MNTK_SUSPEND2)) == 0) { 1771 mp->mnt_secondary_writes++; 1772 mp->mnt_secondary_accwrites++; 1773 MNT_IUNLOCK(mp); 1774 return (0); 1775 } 1776 if (flags & V_NOWAIT) { 1777 MNT_REL(mp); 1778 MNT_IUNLOCK(mp); 1779 return (EWOULDBLOCK); 1780 } 1781 /* 1782 * Wait for the suspension to finish. 1783 */ 1784 error = msleep(&mp->mnt_flag, MNT_MTX(mp), (PUSER - 1) | PDROP | 1785 ((mp->mnt_vfc->vfc_flags & VFCF_SBDRY) != 0 ? (flags & PCATCH) : 0), 1786 "suspfs", 0); 1787 vfs_rel(mp); 1788 if (error == 0) 1789 goto retry; 1790 return (error); 1791} 1792 1793/* 1794 * Filesystem write operation has completed. If we are suspending and this 1795 * operation is the last one, notify the suspender that the suspension is 1796 * now in effect. 1797 */ 1798void 1799vn_finished_write(mp) 1800 struct mount *mp; 1801{ 1802 if (mp == NULL) 1803 return; 1804 MNT_ILOCK(mp); 1805 MNT_REL(mp); 1806 mp->mnt_writeopcount--; 1807 if (mp->mnt_writeopcount < 0) 1808 panic("vn_finished_write: neg cnt"); 1809 if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 && 1810 mp->mnt_writeopcount <= 0) 1811 wakeup(&mp->mnt_writeopcount); 1812 MNT_IUNLOCK(mp); 1813} 1814 1815 1816/* 1817 * Filesystem secondary write operation has completed. If we are 1818 * suspending and this operation is the last one, notify the suspender 1819 * that the suspension is now in effect. 1820 */ 1821void 1822vn_finished_secondary_write(mp) 1823 struct mount *mp; 1824{ 1825 if (mp == NULL) 1826 return; 1827 MNT_ILOCK(mp); 1828 MNT_REL(mp); 1829 mp->mnt_secondary_writes--; 1830 if (mp->mnt_secondary_writes < 0) 1831 panic("vn_finished_secondary_write: neg cnt"); 1832 if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 && 1833 mp->mnt_secondary_writes <= 0) 1834 wakeup(&mp->mnt_secondary_writes); 1835 MNT_IUNLOCK(mp); 1836} 1837 1838 1839 1840/* 1841 * Request a filesystem to suspend write operations. 1842 */ 1843int 1844vfs_write_suspend(struct mount *mp, int flags) 1845{ 1846 int error; 1847 1848 MNT_ILOCK(mp); 1849 if (mp->mnt_susp_owner == curthread) { 1850 MNT_IUNLOCK(mp); 1851 return (EALREADY); 1852 } 1853 while (mp->mnt_kern_flag & MNTK_SUSPEND) 1854 msleep(&mp->mnt_flag, MNT_MTX(mp), PUSER - 1, "wsuspfs", 0); 1855 1856 /* 1857 * Unmount holds a write reference on the mount point. If we 1858 * own busy reference and drain for writers, we deadlock with 1859 * the reference draining in the unmount path. Callers of 1860 * vfs_write_suspend() must specify VS_SKIP_UNMOUNT if 1861 * vfs_busy() reference is owned and caller is not in the 1862 * unmount context. 1863 */ 1864 if ((flags & VS_SKIP_UNMOUNT) != 0 && 1865 (mp->mnt_kern_flag & MNTK_UNMOUNT) != 0) { 1866 MNT_IUNLOCK(mp); 1867 return (EBUSY); 1868 } 1869 1870 mp->mnt_kern_flag |= MNTK_SUSPEND; 1871 mp->mnt_susp_owner = curthread; 1872 if (mp->mnt_writeopcount > 0) 1873 (void) msleep(&mp->mnt_writeopcount, 1874 MNT_MTX(mp), (PUSER - 1)|PDROP, "suspwt", 0); 1875 else 1876 MNT_IUNLOCK(mp); 1877 if ((error = VFS_SYNC(mp, MNT_SUSPEND)) != 0) 1878 vfs_write_resume(mp, 0); 1879 return (error); 1880} 1881 1882/* 1883 * Request a filesystem to resume write operations. 1884 */ 1885void 1886vfs_write_resume(struct mount *mp, int flags) 1887{ 1888 1889 MNT_ILOCK(mp); 1890 if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) { 1891 KASSERT(mp->mnt_susp_owner == curthread, ("mnt_susp_owner")); 1892 mp->mnt_kern_flag &= ~(MNTK_SUSPEND | MNTK_SUSPEND2 | 1893 MNTK_SUSPENDED); 1894 mp->mnt_susp_owner = NULL; 1895 wakeup(&mp->mnt_writeopcount); 1896 wakeup(&mp->mnt_flag); 1897 curthread->td_pflags &= ~TDP_IGNSUSP; 1898 if ((flags & VR_START_WRITE) != 0) { 1899 MNT_REF(mp); 1900 mp->mnt_writeopcount++; 1901 } 1902 MNT_IUNLOCK(mp); 1903 if ((flags & VR_NO_SUSPCLR) == 0) 1904 VFS_SUSP_CLEAN(mp); 1905 } else if ((flags & VR_START_WRITE) != 0) { 1906 MNT_REF(mp); 1907 vn_start_write_locked(mp, 0); 1908 } else { 1909 MNT_IUNLOCK(mp); 1910 } 1911} 1912 1913/* 1914 * Helper loop around vfs_write_suspend() for filesystem unmount VFS 1915 * methods. 1916 */ 1917int 1918vfs_write_suspend_umnt(struct mount *mp) 1919{ 1920 int error; 1921 1922 KASSERT((curthread->td_pflags & TDP_IGNSUSP) == 0, 1923 ("vfs_write_suspend_umnt: recursed")); 1924 1925 /* dounmount() already called vn_start_write(). */ 1926 for (;;) { 1927 vn_finished_write(mp); 1928 error = vfs_write_suspend(mp, 0); 1929 if (error != 0) { 1930 vn_start_write(NULL, &mp, V_WAIT); 1931 return (error); 1932 } 1933 MNT_ILOCK(mp); 1934 if ((mp->mnt_kern_flag & MNTK_SUSPENDED) != 0) 1935 break; 1936 MNT_IUNLOCK(mp); 1937 vn_start_write(NULL, &mp, V_WAIT); 1938 } 1939 mp->mnt_kern_flag &= ~(MNTK_SUSPENDED | MNTK_SUSPEND2); 1940 wakeup(&mp->mnt_flag); 1941 MNT_IUNLOCK(mp); 1942 curthread->td_pflags |= TDP_IGNSUSP; 1943 return (0); 1944} 1945 1946/* 1947 * Implement kqueues for files by translating it to vnode operation. 1948 */ 1949static int 1950vn_kqfilter(struct file *fp, struct knote *kn) 1951{ 1952 1953 return (VOP_KQFILTER(fp->f_vnode, kn)); 1954} 1955 1956/* 1957 * Simplified in-kernel wrapper calls for extended attribute access. 1958 * Both calls pass in a NULL credential, authorizing as "kernel" access. 1959 * Set IO_NODELOCKED in ioflg if the vnode is already locked. 1960 */ 1961int 1962vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace, 1963 const char *attrname, int *buflen, char *buf, struct thread *td) 1964{ 1965 struct uio auio; 1966 struct iovec iov; 1967 int error; 1968 1969 iov.iov_len = *buflen; 1970 iov.iov_base = buf; 1971 1972 auio.uio_iov = &iov; 1973 auio.uio_iovcnt = 1; 1974 auio.uio_rw = UIO_READ; 1975 auio.uio_segflg = UIO_SYSSPACE; 1976 auio.uio_td = td; 1977 auio.uio_offset = 0; 1978 auio.uio_resid = *buflen; 1979 1980 if ((ioflg & IO_NODELOCKED) == 0) 1981 vn_lock(vp, LK_SHARED | LK_RETRY); 1982 1983 ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held"); 1984 1985 /* authorize attribute retrieval as kernel */ 1986 error = VOP_GETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, NULL, 1987 td); 1988 1989 if ((ioflg & IO_NODELOCKED) == 0) 1990 VOP_UNLOCK(vp, 0); 1991 1992 if (error == 0) { 1993 *buflen = *buflen - auio.uio_resid; 1994 } 1995 1996 return (error); 1997} 1998 1999/* 2000 * XXX failure mode if partially written? 2001 */ 2002int 2003vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace, 2004 const char *attrname, int buflen, char *buf, struct thread *td) 2005{ 2006 struct uio auio; 2007 struct iovec iov; 2008 struct mount *mp; 2009 int error; 2010 2011 iov.iov_len = buflen; 2012 iov.iov_base = buf; 2013 2014 auio.uio_iov = &iov; 2015 auio.uio_iovcnt = 1; 2016 auio.uio_rw = UIO_WRITE; 2017 auio.uio_segflg = UIO_SYSSPACE; 2018 auio.uio_td = td; 2019 auio.uio_offset = 0; 2020 auio.uio_resid = buflen; 2021 2022 if ((ioflg & IO_NODELOCKED) == 0) { 2023 if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0) 2024 return (error); 2025 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 2026 } 2027 2028 ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held"); 2029 2030 /* authorize attribute setting as kernel */ 2031 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, td); 2032 2033 if ((ioflg & IO_NODELOCKED) == 0) { 2034 vn_finished_write(mp); 2035 VOP_UNLOCK(vp, 0); 2036 } 2037 2038 return (error); 2039} 2040 2041int 2042vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace, 2043 const char *attrname, struct thread *td) 2044{ 2045 struct mount *mp; 2046 int error; 2047 2048 if ((ioflg & IO_NODELOCKED) == 0) { 2049 if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0) 2050 return (error); 2051 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 2052 } 2053 2054 ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held"); 2055 2056 /* authorize attribute removal as kernel */ 2057 error = VOP_DELETEEXTATTR(vp, attrnamespace, attrname, NULL, td); 2058 if (error == EOPNOTSUPP) 2059 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, NULL, 2060 NULL, td); 2061 2062 if ((ioflg & IO_NODELOCKED) == 0) { 2063 vn_finished_write(mp); 2064 VOP_UNLOCK(vp, 0); 2065 } 2066 2067 return (error); 2068} 2069 2070static int 2071vn_get_ino_alloc_vget(struct mount *mp, void *arg, int lkflags, 2072 struct vnode **rvp) 2073{ 2074 2075 return (VFS_VGET(mp, *(ino_t *)arg, lkflags, rvp)); 2076} 2077 2078int 2079vn_vget_ino(struct vnode *vp, ino_t ino, int lkflags, struct vnode **rvp) 2080{ 2081 2082 return (vn_vget_ino_gen(vp, vn_get_ino_alloc_vget, &ino, 2083 lkflags, rvp)); 2084} 2085 2086int 2087vn_vget_ino_gen(struct vnode *vp, vn_get_ino_t alloc, void *alloc_arg, 2088 int lkflags, struct vnode **rvp) 2089{ 2090 struct mount *mp; 2091 int ltype, error; 2092 2093 ASSERT_VOP_LOCKED(vp, "vn_vget_ino_get"); 2094 mp = vp->v_mount; 2095 ltype = VOP_ISLOCKED(vp); 2096 KASSERT(ltype == LK_EXCLUSIVE || ltype == LK_SHARED, 2097 ("vn_vget_ino: vp not locked")); 2098 error = vfs_busy(mp, MBF_NOWAIT); 2099 if (error != 0) { 2100 vfs_ref(mp); 2101 VOP_UNLOCK(vp, 0); 2102 error = vfs_busy(mp, 0); 2103 vn_lock(vp, ltype | LK_RETRY); 2104 vfs_rel(mp); 2105 if (error != 0) 2106 return (ENOENT); 2107 if (vp->v_iflag & VI_DOOMED) { 2108 vfs_unbusy(mp); 2109 return (ENOENT); 2110 } 2111 } 2112 VOP_UNLOCK(vp, 0); 2113 error = alloc(mp, alloc_arg, lkflags, rvp); 2114 vfs_unbusy(mp); 2115 if (*rvp != vp) 2116 vn_lock(vp, ltype | LK_RETRY); 2117 if (vp->v_iflag & VI_DOOMED) { 2118 if (error == 0) { 2119 if (*rvp == vp) 2120 vunref(vp); 2121 else 2122 vput(*rvp); 2123 } 2124 error = ENOENT; 2125 } 2126 return (error); 2127} 2128 2129int 2130vn_rlimit_fsize(const struct vnode *vp, const struct uio *uio, 2131 const struct thread *td) 2132{ 2133 2134 if (vp->v_type != VREG || td == NULL) 2135 return (0); 2136 PROC_LOCK(td->td_proc); 2137 if ((uoff_t)uio->uio_offset + uio->uio_resid > 2138 lim_cur(td->td_proc, RLIMIT_FSIZE)) { 2139 kern_psignal(td->td_proc, SIGXFSZ); 2140 PROC_UNLOCK(td->td_proc); 2141 return (EFBIG); 2142 } 2143 PROC_UNLOCK(td->td_proc); 2144 return (0); 2145} 2146 2147int 2148vn_chmod(struct file *fp, mode_t mode, struct ucred *active_cred, 2149 struct thread *td) 2150{ 2151 struct vnode *vp; 2152 2153 vp = fp->f_vnode; 2154#ifdef AUDIT 2155 vn_lock(vp, LK_SHARED | LK_RETRY); 2156 AUDIT_ARG_VNODE1(vp); 2157 VOP_UNLOCK(vp, 0); 2158#endif 2159 return (setfmode(td, active_cred, vp, mode)); 2160} 2161 2162int 2163vn_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred, 2164 struct thread *td) 2165{ 2166 struct vnode *vp; 2167 2168 vp = fp->f_vnode; 2169#ifdef AUDIT 2170 vn_lock(vp, LK_SHARED | LK_RETRY); 2171 AUDIT_ARG_VNODE1(vp); 2172 VOP_UNLOCK(vp, 0); 2173#endif 2174 return (setfown(td, active_cred, vp, uid, gid)); 2175} 2176 2177void 2178vn_pages_remove(struct vnode *vp, vm_pindex_t start, vm_pindex_t end) 2179{ 2180 vm_object_t object; 2181 2182 if ((object = vp->v_object) == NULL) 2183 return; 2184 VM_OBJECT_WLOCK(object); 2185 vm_object_page_remove(object, start, end, 0); 2186 VM_OBJECT_WUNLOCK(object); 2187} 2188 2189int 2190vn_bmap_seekhole(struct vnode *vp, u_long cmd, off_t *off, struct ucred *cred) 2191{ 2192 struct vattr va; 2193 daddr_t bn, bnp; 2194 uint64_t bsize; 2195 off_t noff; 2196 int error; 2197 2198 KASSERT(cmd == FIOSEEKHOLE || cmd == FIOSEEKDATA, 2199 ("Wrong command %lu", cmd)); 2200 2201 if (vn_lock(vp, LK_SHARED) != 0) 2202 return (EBADF); 2203 if (vp->v_type != VREG) { 2204 error = ENOTTY; 2205 goto unlock; 2206 } 2207 error = VOP_GETATTR(vp, &va, cred); 2208 if (error != 0) 2209 goto unlock; 2210 noff = *off; 2211 if (noff >= va.va_size) { 2212 error = ENXIO; 2213 goto unlock; 2214 } 2215 bsize = vp->v_mount->mnt_stat.f_iosize; 2216 for (bn = noff / bsize; noff < va.va_size; bn++, noff += bsize) { 2217 error = VOP_BMAP(vp, bn, NULL, &bnp, NULL, NULL); 2218 if (error == EOPNOTSUPP) { 2219 error = ENOTTY; 2220 goto unlock; 2221 } 2222 if ((bnp == -1 && cmd == FIOSEEKHOLE) || 2223 (bnp != -1 && cmd == FIOSEEKDATA)) { 2224 noff = bn * bsize; 2225 if (noff < *off) 2226 noff = *off; 2227 goto unlock; 2228 } 2229 } 2230 if (noff > va.va_size) 2231 noff = va.va_size; 2232 /* noff == va.va_size. There is an implicit hole at the end of file. */ 2233 if (cmd == FIOSEEKDATA) 2234 error = ENXIO; 2235unlock: 2236 VOP_UNLOCK(vp, 0); 2237 if (error == 0) 2238 *off = noff; 2239 return (error); 2240} 2241 2242int 2243vn_seek(struct file *fp, off_t offset, int whence, struct thread *td) 2244{ 2245 struct ucred *cred; 2246 struct vnode *vp; 2247 struct vattr vattr; 2248 off_t foffset, size; 2249 int error, noneg; 2250 2251 cred = td->td_ucred; 2252 vp = fp->f_vnode; 2253 foffset = foffset_lock(fp, 0); 2254 noneg = (vp->v_type != VCHR); 2255 error = 0; 2256 switch (whence) { 2257 case L_INCR: 2258 if (noneg && 2259 (foffset < 0 || 2260 (offset > 0 && foffset > OFF_MAX - offset))) { 2261 error = EOVERFLOW; 2262 break; 2263 } 2264 offset += foffset; 2265 break; 2266 case L_XTND: 2267 vn_lock(vp, LK_SHARED | LK_RETRY); 2268 error = VOP_GETATTR(vp, &vattr, cred); 2269 VOP_UNLOCK(vp, 0); 2270 if (error) 2271 break; 2272 2273 /* 2274 * If the file references a disk device, then fetch 2275 * the media size and use that to determine the ending 2276 * offset. 2277 */ 2278 if (vattr.va_size == 0 && vp->v_type == VCHR && 2279 fo_ioctl(fp, DIOCGMEDIASIZE, &size, cred, td) == 0) 2280 vattr.va_size = size; 2281 if (noneg && 2282 (vattr.va_size > OFF_MAX || 2283 (offset > 0 && vattr.va_size > OFF_MAX - offset))) { 2284 error = EOVERFLOW; 2285 break; 2286 } 2287 offset += vattr.va_size; 2288 break; 2289 case L_SET: 2290 break; 2291 case SEEK_DATA: 2292 error = fo_ioctl(fp, FIOSEEKDATA, &offset, cred, td); 2293 break; 2294 case SEEK_HOLE: 2295 error = fo_ioctl(fp, FIOSEEKHOLE, &offset, cred, td); 2296 break; 2297 default: 2298 error = EINVAL; 2299 } 2300 if (error == 0 && noneg && offset < 0) 2301 error = EINVAL; 2302 if (error != 0) 2303 goto drop; 2304 VFS_KNOTE_UNLOCKED(vp, 0); 2305 *(off_t *)(td->td_retval) = offset; 2306drop: 2307 foffset_unlock(fp, offset, error != 0 ? FOF_NOUPDATE : 0); 2308 return (error); 2309} 2310 2311int 2312vn_utimes_perm(struct vnode *vp, struct vattr *vap, struct ucred *cred, 2313 struct thread *td) 2314{ 2315 int error; 2316 2317 /* 2318 * Grant permission if the caller is the owner of the file, or 2319 * the super-user, or has ACL_WRITE_ATTRIBUTES permission on 2320 * on the file. If the time pointer is null, then write 2321 * permission on the file is also sufficient. 2322 * 2323 * From NFSv4.1, draft 21, 6.2.1.3.1, Discussion of Mask Attributes: 2324 * A user having ACL_WRITE_DATA or ACL_WRITE_ATTRIBUTES 2325 * will be allowed to set the times [..] to the current 2326 * server time. 2327 */ 2328 error = VOP_ACCESSX(vp, VWRITE_ATTRIBUTES, cred, td); 2329 if (error != 0 && (vap->va_vaflags & VA_UTIMES_NULL) != 0) 2330 error = VOP_ACCESS(vp, VWRITE, cred, td); 2331 return (error); 2332} 2333