40 */ 41 42#include <sys/param.h> 43#include <sys/systm.h> 44#include <sys/fcntl.h> 45#include <sys/file.h> 46#include <sys/stat.h> 47#include <sys/proc.h> 48#include <sys/lock.h> 49#include <sys/mount.h> 50#include <sys/mutex.h> 51#include <sys/namei.h> 52#include <sys/vnode.h> 53#include <sys/bio.h> 54#include <sys/buf.h> 55#include <sys/filio.h> 56#include <sys/sx.h> 57#include <sys/ttycom.h> 58#include <sys/conf.h> 59#include <sys/syslog.h> 60 61#include <machine/limits.h> 62 63static int vn_closefile(struct file *fp, struct thread *td); 64static int vn_ioctl(struct file *fp, u_long com, void *data, 65 struct thread *td); 66static int vn_read(struct file *fp, struct uio *uio, 67 struct ucred *cred, int flags, struct thread *td); 68static int vn_poll(struct file *fp, int events, struct ucred *cred, 69 struct thread *td); 70static int vn_kqfilter(struct file *fp, struct knote *kn); 71static int vn_statfile(struct file *fp, struct stat *sb, struct thread *td); 72static int vn_write(struct file *fp, struct uio *uio, 73 struct ucred *cred, int flags, struct thread *td); 74 75struct fileops vnops = { 76 vn_read, vn_write, vn_ioctl, vn_poll, vn_kqfilter, 77 vn_statfile, vn_closefile 78}; 79 80int 81vn_open(ndp, flagp, cmode) 82 register struct nameidata *ndp; 83 int *flagp, cmode; 84{ 85 struct thread *td = ndp->ni_cnd.cn_thread; 86 87 return (vn_open_cred(ndp, flagp, cmode, td->td_ucred)); 88} 89 90/* 91 * Common code for vnode open operations. 92 * Check permissions, and call the VOP_OPEN or VOP_CREATE routine. 93 * 94 * Note that this does NOT free nameidata for the successful case, 95 * due to the NDINIT being done elsewhere. 96 */ 97int 98vn_open_cred(ndp, flagp, cmode, cred) 99 register struct nameidata *ndp; 100 int *flagp, cmode; 101 struct ucred *cred; 102{ 103 struct vnode *vp; 104 struct mount *mp; 105 struct thread *td = ndp->ni_cnd.cn_thread; 106 struct vattr vat; 107 struct vattr *vap = &vat; 108 int mode, fmode, error; 109#ifdef LOOKUP_SHARED 110 int exclusive; /* The current intended lock state */ 111 112 exclusive = 0; 113#endif 114 115restart: 116 fmode = *flagp; 117 if (fmode & O_CREAT) { 118 ndp->ni_cnd.cn_nameiop = CREATE; 119 ndp->ni_cnd.cn_flags = LOCKPARENT | LOCKLEAF; 120 if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0) 121 ndp->ni_cnd.cn_flags |= FOLLOW; 122 bwillwrite(); 123 if ((error = namei(ndp)) != 0) 124 return (error); 125 if (ndp->ni_vp == NULL) { 126 VATTR_NULL(vap); 127 vap->va_type = VREG; 128 vap->va_mode = cmode; 129 if (fmode & O_EXCL) 130 vap->va_vaflags |= VA_EXCLUSIVE; 131 if (vn_start_write(ndp->ni_dvp, &mp, V_NOWAIT) != 0) { 132 NDFREE(ndp, NDF_ONLY_PNBUF); 133 vput(ndp->ni_dvp); 134 if ((error = vn_start_write(NULL, &mp, 135 V_XSLEEP | PCATCH)) != 0) 136 return (error); 137 goto restart; 138 } 139 VOP_LEASE(ndp->ni_dvp, td, cred, LEASE_WRITE); 140 error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp, 141 &ndp->ni_cnd, vap); 142 vput(ndp->ni_dvp); 143 vn_finished_write(mp); 144 if (error) { 145 NDFREE(ndp, NDF_ONLY_PNBUF); 146 return (error); 147 } 148 ASSERT_VOP_UNLOCKED(ndp->ni_dvp, "create"); 149 ASSERT_VOP_LOCKED(ndp->ni_vp, "create"); 150 fmode &= ~O_TRUNC; 151 vp = ndp->ni_vp; 152#ifdef LOOKUP_SHARED 153 exclusive = 1; 154#endif 155 } else { 156 if (ndp->ni_dvp == ndp->ni_vp) 157 vrele(ndp->ni_dvp); 158 else 159 vput(ndp->ni_dvp); 160 ndp->ni_dvp = NULL; 161 vp = ndp->ni_vp; 162 if (fmode & O_EXCL) { 163 error = EEXIST; 164 goto bad; 165 } 166 fmode &= ~O_CREAT; 167 } 168 } else { 169 ndp->ni_cnd.cn_nameiop = LOOKUP; 170#ifdef LOOKUP_SHARED 171 ndp->ni_cnd.cn_flags = 172 ((fmode & O_NOFOLLOW) ? NOFOLLOW : FOLLOW) | 173 LOCKSHARED | LOCKLEAF; 174#else 175 ndp->ni_cnd.cn_flags = 176 ((fmode & O_NOFOLLOW) ? NOFOLLOW : FOLLOW) | LOCKLEAF; 177#endif 178 if ((error = namei(ndp)) != 0) 179 return (error); 180 vp = ndp->ni_vp; 181 } 182 if (vp->v_type == VLNK) { 183 error = EMLINK; 184 goto bad; 185 } 186 if (vp->v_type == VSOCK) { 187 error = EOPNOTSUPP; 188 goto bad; 189 } 190 if ((fmode & O_CREAT) == 0) { 191 mode = 0; 192 if (fmode & (FWRITE | O_TRUNC)) { 193 if (vp->v_type == VDIR) { 194 error = EISDIR; 195 goto bad; 196 } 197 error = vn_writechk(vp); 198 if (error) 199 goto bad; 200 mode |= VWRITE; 201 } 202 if (fmode & FREAD) 203 mode |= VREAD; 204 if (mode) { 205 error = VOP_ACCESS(vp, mode, cred, td); 206 if (error) 207 goto bad; 208 } 209 } 210 if ((error = VOP_OPEN(vp, fmode, cred, td)) != 0) 211 goto bad; 212 /* 213 * Make sure that a VM object is created for VMIO support. 214 */ 215 if (vn_canvmio(vp) == TRUE) { 216#ifdef LOOKUP_SHARED 217 int flock; 218 219 if (!exclusive && VOP_GETVOBJECT(vp, NULL) != 0) 220 VOP_LOCK(vp, LK_UPGRADE, td); 221 /* 222 * In cases where the object is marked as dead object_create 223 * will unlock and relock exclusive. It is safe to call in 224 * here with a shared lock because we only examine fields that 225 * the shared lock guarantees will be stable. In the UPGRADE 226 * case it is not likely that anyone has used this vnode yet 227 * so there will be no contention. The logic after this call 228 * restores the requested locking state. 229 */ 230#endif 231 if ((error = vfs_object_create(vp, td, cred)) != 0) { 232 VOP_UNLOCK(vp, 0, td); 233 VOP_CLOSE(vp, fmode, cred, td); 234 NDFREE(ndp, NDF_ONLY_PNBUF); 235 vrele(vp); 236 *flagp = fmode; 237 return (error); 238 } 239#ifdef LOOKUP_SHARED 240 flock = VOP_ISLOCKED(vp, td); 241 if (!exclusive && flock == LK_EXCLUSIVE) 242 VOP_LOCK(vp, LK_DOWNGRADE, td); 243#endif 244 } 245 246 if (fmode & FWRITE) 247 vp->v_writecount++; 248 *flagp = fmode; 249 return (0); 250bad: 251 NDFREE(ndp, NDF_ONLY_PNBUF); 252 vput(vp); 253 *flagp = fmode; 254 return (error); 255} 256 257/* 258 * Check for write permissions on the specified vnode. 259 * Prototype text segments cannot be written. 260 */ 261int 262vn_writechk(vp) 263 register struct vnode *vp; 264{ 265 266 /* 267 * If there's shared text associated with 268 * the vnode, try to free it up once. If 269 * we fail, we can't allow writing. 270 */ 271 if (vp->v_flag & VTEXT) 272 return (ETXTBSY); 273 return (0); 274} 275 276/* 277 * Vnode close call 278 */ 279int 280vn_close(vp, flags, cred, td) 281 register struct vnode *vp; 282 int flags; 283 struct ucred *cred; 284 struct thread *td; 285{ 286 int error; 287 288 if (flags & FWRITE) 289 vp->v_writecount--; 290 error = VOP_CLOSE(vp, flags, cred, td); 291 /* 292 * XXX - In certain instances VOP_CLOSE has to do the vrele 293 * itself. If the vrele has been done, it will return EAGAIN 294 * to indicate that the vrele should not be done again. When 295 * this happens, we just return success. The correct thing to 296 * do would be to have all VOP_CLOSE instances do the vrele. 297 */ 298 if (error == EAGAIN) 299 return (0); 300 vrele(vp); 301 return (error); 302} 303 304/* 305 * Sequential heuristic - detect sequential operation 306 */ 307static __inline 308int 309sequential_heuristic(struct uio *uio, struct file *fp) 310{ 311 312 if ((uio->uio_offset == 0 && fp->f_seqcount > 0) || 313 uio->uio_offset == fp->f_nextoff) { 314 /* 315 * XXX we assume that the filesystem block size is 316 * the default. Not true, but still gives us a pretty 317 * good indicator of how sequential the read operations 318 * are. 319 */ 320 fp->f_seqcount += (uio->uio_resid + BKVASIZE - 1) / BKVASIZE; 321 if (fp->f_seqcount >= 127) 322 fp->f_seqcount = 127; 323 return(fp->f_seqcount << 16); 324 } 325 326 /* 327 * Not sequential, quick draw-down of seqcount 328 */ 329 if (fp->f_seqcount > 1) 330 fp->f_seqcount = 1; 331 else 332 fp->f_seqcount = 0; 333 return(0); 334} 335 336/* 337 * Package up an I/O request on a vnode into a uio and do it. 338 */ 339int 340vn_rdwr(rw, vp, base, len, offset, segflg, ioflg, cred, aresid, td) 341 enum uio_rw rw; 342 struct vnode *vp; 343 caddr_t base; 344 int len; 345 off_t offset; 346 enum uio_seg segflg; 347 int ioflg; 348 struct ucred *cred; 349 int *aresid; 350 struct thread *td; 351{ 352 struct uio auio; 353 struct iovec aiov; 354 struct mount *mp; 355 int error; 356 357 if ((ioflg & IO_NODELOCKED) == 0) { 358 mp = NULL; 359 if (rw == UIO_WRITE) { 360 if (vp->v_type != VCHR && 361 (error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) 362 != 0) 363 return (error); 364 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 365 } else { 366 vn_lock(vp, LK_SHARED | LK_RETRY, td); 367 } 368 369 } 370 auio.uio_iov = &aiov; 371 auio.uio_iovcnt = 1; 372 aiov.iov_base = base; 373 aiov.iov_len = len; 374 auio.uio_resid = len; 375 auio.uio_offset = offset; 376 auio.uio_segflg = segflg; 377 auio.uio_rw = rw; 378 auio.uio_td = td; 379 if (rw == UIO_READ) { 380 error = VOP_READ(vp, &auio, ioflg, cred); 381 } else { 382 error = VOP_WRITE(vp, &auio, ioflg, cred); 383 } 384 if (aresid) 385 *aresid = auio.uio_resid; 386 else 387 if (auio.uio_resid && error == 0) 388 error = EIO; 389 if ((ioflg & IO_NODELOCKED) == 0) { 390 if (rw == UIO_WRITE) 391 vn_finished_write(mp); 392 VOP_UNLOCK(vp, 0, td); 393 } 394 return (error); 395} 396 397/* 398 * Package up an I/O request on a vnode into a uio and do it. The I/O 399 * request is split up into smaller chunks and we try to avoid saturating 400 * the buffer cache while potentially holding a vnode locked, so we 401 * check bwillwrite() before calling vn_rdwr(). We also call uio_yield() 402 * to give other processes a chance to lock the vnode (either other processes 403 * core'ing the same binary, or unrelated processes scanning the directory). 404 */ 405int 406vn_rdwr_inchunks(rw, vp, base, len, offset, segflg, ioflg, cred, aresid, td) 407 enum uio_rw rw; 408 struct vnode *vp; 409 caddr_t base; 410 int len; 411 off_t offset; 412 enum uio_seg segflg; 413 int ioflg; 414 struct ucred *cred; 415 int *aresid; 416 struct thread *td; 417{ 418 int error = 0; 419 420 do { 421 int chunk = (len > MAXBSIZE) ? MAXBSIZE : len; 422 423 if (rw != UIO_READ && vp->v_type == VREG) 424 bwillwrite(); 425 error = vn_rdwr(rw, vp, base, chunk, offset, segflg, 426 ioflg, cred, aresid, td); 427 len -= chunk; /* aresid calc already includes length */ 428 if (error) 429 break; 430 offset += chunk; 431 base += chunk; 432 uio_yield(); 433 } while (len); 434 if (aresid) 435 *aresid += len; 436 return (error); 437} 438 439/* 440 * File table vnode read routine. 441 */ 442static int 443vn_read(fp, uio, cred, flags, td) 444 struct file *fp; 445 struct uio *uio; 446 struct ucred *cred; 447 struct thread *td; 448 int flags; 449{ 450 struct vnode *vp; 451 int error, ioflag; 452 453 mtx_lock(&Giant); 454 KASSERT(uio->uio_td == td, ("uio_td %p is not td %p", 455 uio->uio_td, td)); 456 vp = (struct vnode *)fp->f_data; 457 ioflag = 0; 458 if (fp->f_flag & FNONBLOCK) 459 ioflag |= IO_NDELAY; 460 if (fp->f_flag & O_DIRECT) 461 ioflag |= IO_DIRECT; 462 VOP_LEASE(vp, td, cred, LEASE_READ); 463 vn_lock(vp, LK_SHARED | LK_NOPAUSE | LK_RETRY, td); 464 if ((flags & FOF_OFFSET) == 0) 465 uio->uio_offset = fp->f_offset; 466 467 ioflag |= sequential_heuristic(uio, fp); 468 469 error = VOP_READ(vp, uio, ioflag, cred); 470 if ((flags & FOF_OFFSET) == 0) 471 fp->f_offset = uio->uio_offset; 472 fp->f_nextoff = uio->uio_offset; 473 VOP_UNLOCK(vp, 0, td); 474 mtx_unlock(&Giant); 475 return (error); 476} 477 478/* 479 * File table vnode write routine. 480 */ 481static int 482vn_write(fp, uio, cred, flags, td) 483 struct file *fp; 484 struct uio *uio; 485 struct ucred *cred; 486 struct thread *td; 487 int flags; 488{ 489 struct vnode *vp; 490 struct mount *mp; 491 int error, ioflag; 492 493 mtx_lock(&Giant); 494 KASSERT(uio->uio_td == td, ("uio_td %p is not td %p", 495 uio->uio_td, td)); 496 vp = (struct vnode *)fp->f_data; 497 if (vp->v_type == VREG) 498 bwillwrite(); 499 ioflag = IO_UNIT; 500 if (vp->v_type == VREG && (fp->f_flag & O_APPEND)) 501 ioflag |= IO_APPEND; 502 if (fp->f_flag & FNONBLOCK) 503 ioflag |= IO_NDELAY; 504 if (fp->f_flag & O_DIRECT) 505 ioflag |= IO_DIRECT; 506 if ((fp->f_flag & O_FSYNC) || 507 (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))) 508 ioflag |= IO_SYNC; 509 mp = NULL; 510 if (vp->v_type != VCHR && 511 (error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0) { 512 mtx_unlock(&Giant); 513 return (error); 514 } 515 VOP_LEASE(vp, td, cred, LEASE_WRITE); 516 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 517 if ((flags & FOF_OFFSET) == 0) 518 uio->uio_offset = fp->f_offset; 519 ioflag |= sequential_heuristic(uio, fp); 520 error = VOP_WRITE(vp, uio, ioflag, cred); 521 if ((flags & FOF_OFFSET) == 0) 522 fp->f_offset = uio->uio_offset; 523 fp->f_nextoff = uio->uio_offset; 524 VOP_UNLOCK(vp, 0, td); 525 vn_finished_write(mp); 526 mtx_unlock(&Giant); 527 return (error); 528} 529 530/* 531 * File table vnode stat routine. 532 */ 533static int 534vn_statfile(fp, sb, td) 535 struct file *fp; 536 struct stat *sb; 537 struct thread *td; 538{ 539 struct vnode *vp = (struct vnode *)fp->f_data; 540 int error; 541 542 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 543 error = vn_stat(vp, sb, td); 544 VOP_UNLOCK(vp, 0, td); 545 546 return (error); 547} 548 549/* 550 * Stat a vnode; implementation for the stat syscall 551 */ 552int 553vn_stat(vp, sb, td) 554 struct vnode *vp; 555 register struct stat *sb; 556 struct thread *td; 557{ 558 struct vattr vattr; 559 register struct vattr *vap; 560 int error; 561 u_short mode; 562 563 vap = &vattr; 564 error = VOP_GETATTR(vp, vap, td->td_ucred, td); 565 if (error) 566 return (error); 567 568 /* 569 * Zero the spare stat fields 570 */ 571 bzero(sb, sizeof *sb); 572 573 /* 574 * Copy from vattr table 575 */ 576 if (vap->va_fsid != VNOVAL) 577 sb->st_dev = vap->va_fsid; 578 else 579 sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0]; 580 sb->st_ino = vap->va_fileid; 581 mode = vap->va_mode; 582 switch (vap->va_type) { 583 case VREG: 584 mode |= S_IFREG; 585 break; 586 case VDIR: 587 mode |= S_IFDIR; 588 break; 589 case VBLK: 590 mode |= S_IFBLK; 591 break; 592 case VCHR: 593 mode |= S_IFCHR; 594 break; 595 case VLNK: 596 mode |= S_IFLNK; 597 /* This is a cosmetic change, symlinks do not have a mode. */ 598 if (vp->v_mount->mnt_flag & MNT_NOSYMFOLLOW) 599 sb->st_mode &= ~ACCESSPERMS; /* 0000 */ 600 else 601 sb->st_mode |= ACCESSPERMS; /* 0777 */ 602 break; 603 case VSOCK: 604 mode |= S_IFSOCK; 605 break; 606 case VFIFO: 607 mode |= S_IFIFO; 608 break; 609 default: 610 return (EBADF); 611 }; 612 sb->st_mode = mode; 613 sb->st_nlink = vap->va_nlink; 614 sb->st_uid = vap->va_uid; 615 sb->st_gid = vap->va_gid; 616 sb->st_rdev = vap->va_rdev; 617 if (vap->va_size > OFF_MAX) 618 return (EOVERFLOW); 619 sb->st_size = vap->va_size; 620 sb->st_atimespec = vap->va_atime; 621 sb->st_mtimespec = vap->va_mtime; 622 sb->st_ctimespec = vap->va_ctime;
| 40 */ 41 42#include <sys/param.h> 43#include <sys/systm.h> 44#include <sys/fcntl.h> 45#include <sys/file.h> 46#include <sys/stat.h> 47#include <sys/proc.h> 48#include <sys/lock.h> 49#include <sys/mount.h> 50#include <sys/mutex.h> 51#include <sys/namei.h> 52#include <sys/vnode.h> 53#include <sys/bio.h> 54#include <sys/buf.h> 55#include <sys/filio.h> 56#include <sys/sx.h> 57#include <sys/ttycom.h> 58#include <sys/conf.h> 59#include <sys/syslog.h> 60 61#include <machine/limits.h> 62 63static int vn_closefile(struct file *fp, struct thread *td); 64static int vn_ioctl(struct file *fp, u_long com, void *data, 65 struct thread *td); 66static int vn_read(struct file *fp, struct uio *uio, 67 struct ucred *cred, int flags, struct thread *td); 68static int vn_poll(struct file *fp, int events, struct ucred *cred, 69 struct thread *td); 70static int vn_kqfilter(struct file *fp, struct knote *kn); 71static int vn_statfile(struct file *fp, struct stat *sb, struct thread *td); 72static int vn_write(struct file *fp, struct uio *uio, 73 struct ucred *cred, int flags, struct thread *td); 74 75struct fileops vnops = { 76 vn_read, vn_write, vn_ioctl, vn_poll, vn_kqfilter, 77 vn_statfile, vn_closefile 78}; 79 80int 81vn_open(ndp, flagp, cmode) 82 register struct nameidata *ndp; 83 int *flagp, cmode; 84{ 85 struct thread *td = ndp->ni_cnd.cn_thread; 86 87 return (vn_open_cred(ndp, flagp, cmode, td->td_ucred)); 88} 89 90/* 91 * Common code for vnode open operations. 92 * Check permissions, and call the VOP_OPEN or VOP_CREATE routine. 93 * 94 * Note that this does NOT free nameidata for the successful case, 95 * due to the NDINIT being done elsewhere. 96 */ 97int 98vn_open_cred(ndp, flagp, cmode, cred) 99 register struct nameidata *ndp; 100 int *flagp, cmode; 101 struct ucred *cred; 102{ 103 struct vnode *vp; 104 struct mount *mp; 105 struct thread *td = ndp->ni_cnd.cn_thread; 106 struct vattr vat; 107 struct vattr *vap = &vat; 108 int mode, fmode, error; 109#ifdef LOOKUP_SHARED 110 int exclusive; /* The current intended lock state */ 111 112 exclusive = 0; 113#endif 114 115restart: 116 fmode = *flagp; 117 if (fmode & O_CREAT) { 118 ndp->ni_cnd.cn_nameiop = CREATE; 119 ndp->ni_cnd.cn_flags = LOCKPARENT | LOCKLEAF; 120 if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0) 121 ndp->ni_cnd.cn_flags |= FOLLOW; 122 bwillwrite(); 123 if ((error = namei(ndp)) != 0) 124 return (error); 125 if (ndp->ni_vp == NULL) { 126 VATTR_NULL(vap); 127 vap->va_type = VREG; 128 vap->va_mode = cmode; 129 if (fmode & O_EXCL) 130 vap->va_vaflags |= VA_EXCLUSIVE; 131 if (vn_start_write(ndp->ni_dvp, &mp, V_NOWAIT) != 0) { 132 NDFREE(ndp, NDF_ONLY_PNBUF); 133 vput(ndp->ni_dvp); 134 if ((error = vn_start_write(NULL, &mp, 135 V_XSLEEP | PCATCH)) != 0) 136 return (error); 137 goto restart; 138 } 139 VOP_LEASE(ndp->ni_dvp, td, cred, LEASE_WRITE); 140 error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp, 141 &ndp->ni_cnd, vap); 142 vput(ndp->ni_dvp); 143 vn_finished_write(mp); 144 if (error) { 145 NDFREE(ndp, NDF_ONLY_PNBUF); 146 return (error); 147 } 148 ASSERT_VOP_UNLOCKED(ndp->ni_dvp, "create"); 149 ASSERT_VOP_LOCKED(ndp->ni_vp, "create"); 150 fmode &= ~O_TRUNC; 151 vp = ndp->ni_vp; 152#ifdef LOOKUP_SHARED 153 exclusive = 1; 154#endif 155 } else { 156 if (ndp->ni_dvp == ndp->ni_vp) 157 vrele(ndp->ni_dvp); 158 else 159 vput(ndp->ni_dvp); 160 ndp->ni_dvp = NULL; 161 vp = ndp->ni_vp; 162 if (fmode & O_EXCL) { 163 error = EEXIST; 164 goto bad; 165 } 166 fmode &= ~O_CREAT; 167 } 168 } else { 169 ndp->ni_cnd.cn_nameiop = LOOKUP; 170#ifdef LOOKUP_SHARED 171 ndp->ni_cnd.cn_flags = 172 ((fmode & O_NOFOLLOW) ? NOFOLLOW : FOLLOW) | 173 LOCKSHARED | LOCKLEAF; 174#else 175 ndp->ni_cnd.cn_flags = 176 ((fmode & O_NOFOLLOW) ? NOFOLLOW : FOLLOW) | LOCKLEAF; 177#endif 178 if ((error = namei(ndp)) != 0) 179 return (error); 180 vp = ndp->ni_vp; 181 } 182 if (vp->v_type == VLNK) { 183 error = EMLINK; 184 goto bad; 185 } 186 if (vp->v_type == VSOCK) { 187 error = EOPNOTSUPP; 188 goto bad; 189 } 190 if ((fmode & O_CREAT) == 0) { 191 mode = 0; 192 if (fmode & (FWRITE | O_TRUNC)) { 193 if (vp->v_type == VDIR) { 194 error = EISDIR; 195 goto bad; 196 } 197 error = vn_writechk(vp); 198 if (error) 199 goto bad; 200 mode |= VWRITE; 201 } 202 if (fmode & FREAD) 203 mode |= VREAD; 204 if (mode) { 205 error = VOP_ACCESS(vp, mode, cred, td); 206 if (error) 207 goto bad; 208 } 209 } 210 if ((error = VOP_OPEN(vp, fmode, cred, td)) != 0) 211 goto bad; 212 /* 213 * Make sure that a VM object is created for VMIO support. 214 */ 215 if (vn_canvmio(vp) == TRUE) { 216#ifdef LOOKUP_SHARED 217 int flock; 218 219 if (!exclusive && VOP_GETVOBJECT(vp, NULL) != 0) 220 VOP_LOCK(vp, LK_UPGRADE, td); 221 /* 222 * In cases where the object is marked as dead object_create 223 * will unlock and relock exclusive. It is safe to call in 224 * here with a shared lock because we only examine fields that 225 * the shared lock guarantees will be stable. In the UPGRADE 226 * case it is not likely that anyone has used this vnode yet 227 * so there will be no contention. The logic after this call 228 * restores the requested locking state. 229 */ 230#endif 231 if ((error = vfs_object_create(vp, td, cred)) != 0) { 232 VOP_UNLOCK(vp, 0, td); 233 VOP_CLOSE(vp, fmode, cred, td); 234 NDFREE(ndp, NDF_ONLY_PNBUF); 235 vrele(vp); 236 *flagp = fmode; 237 return (error); 238 } 239#ifdef LOOKUP_SHARED 240 flock = VOP_ISLOCKED(vp, td); 241 if (!exclusive && flock == LK_EXCLUSIVE) 242 VOP_LOCK(vp, LK_DOWNGRADE, td); 243#endif 244 } 245 246 if (fmode & FWRITE) 247 vp->v_writecount++; 248 *flagp = fmode; 249 return (0); 250bad: 251 NDFREE(ndp, NDF_ONLY_PNBUF); 252 vput(vp); 253 *flagp = fmode; 254 return (error); 255} 256 257/* 258 * Check for write permissions on the specified vnode. 259 * Prototype text segments cannot be written. 260 */ 261int 262vn_writechk(vp) 263 register struct vnode *vp; 264{ 265 266 /* 267 * If there's shared text associated with 268 * the vnode, try to free it up once. If 269 * we fail, we can't allow writing. 270 */ 271 if (vp->v_flag & VTEXT) 272 return (ETXTBSY); 273 return (0); 274} 275 276/* 277 * Vnode close call 278 */ 279int 280vn_close(vp, flags, cred, td) 281 register struct vnode *vp; 282 int flags; 283 struct ucred *cred; 284 struct thread *td; 285{ 286 int error; 287 288 if (flags & FWRITE) 289 vp->v_writecount--; 290 error = VOP_CLOSE(vp, flags, cred, td); 291 /* 292 * XXX - In certain instances VOP_CLOSE has to do the vrele 293 * itself. If the vrele has been done, it will return EAGAIN 294 * to indicate that the vrele should not be done again. When 295 * this happens, we just return success. The correct thing to 296 * do would be to have all VOP_CLOSE instances do the vrele. 297 */ 298 if (error == EAGAIN) 299 return (0); 300 vrele(vp); 301 return (error); 302} 303 304/* 305 * Sequential heuristic - detect sequential operation 306 */ 307static __inline 308int 309sequential_heuristic(struct uio *uio, struct file *fp) 310{ 311 312 if ((uio->uio_offset == 0 && fp->f_seqcount > 0) || 313 uio->uio_offset == fp->f_nextoff) { 314 /* 315 * XXX we assume that the filesystem block size is 316 * the default. Not true, but still gives us a pretty 317 * good indicator of how sequential the read operations 318 * are. 319 */ 320 fp->f_seqcount += (uio->uio_resid + BKVASIZE - 1) / BKVASIZE; 321 if (fp->f_seqcount >= 127) 322 fp->f_seqcount = 127; 323 return(fp->f_seqcount << 16); 324 } 325 326 /* 327 * Not sequential, quick draw-down of seqcount 328 */ 329 if (fp->f_seqcount > 1) 330 fp->f_seqcount = 1; 331 else 332 fp->f_seqcount = 0; 333 return(0); 334} 335 336/* 337 * Package up an I/O request on a vnode into a uio and do it. 338 */ 339int 340vn_rdwr(rw, vp, base, len, offset, segflg, ioflg, cred, aresid, td) 341 enum uio_rw rw; 342 struct vnode *vp; 343 caddr_t base; 344 int len; 345 off_t offset; 346 enum uio_seg segflg; 347 int ioflg; 348 struct ucred *cred; 349 int *aresid; 350 struct thread *td; 351{ 352 struct uio auio; 353 struct iovec aiov; 354 struct mount *mp; 355 int error; 356 357 if ((ioflg & IO_NODELOCKED) == 0) { 358 mp = NULL; 359 if (rw == UIO_WRITE) { 360 if (vp->v_type != VCHR && 361 (error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) 362 != 0) 363 return (error); 364 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 365 } else { 366 vn_lock(vp, LK_SHARED | LK_RETRY, td); 367 } 368 369 } 370 auio.uio_iov = &aiov; 371 auio.uio_iovcnt = 1; 372 aiov.iov_base = base; 373 aiov.iov_len = len; 374 auio.uio_resid = len; 375 auio.uio_offset = offset; 376 auio.uio_segflg = segflg; 377 auio.uio_rw = rw; 378 auio.uio_td = td; 379 if (rw == UIO_READ) { 380 error = VOP_READ(vp, &auio, ioflg, cred); 381 } else { 382 error = VOP_WRITE(vp, &auio, ioflg, cred); 383 } 384 if (aresid) 385 *aresid = auio.uio_resid; 386 else 387 if (auio.uio_resid && error == 0) 388 error = EIO; 389 if ((ioflg & IO_NODELOCKED) == 0) { 390 if (rw == UIO_WRITE) 391 vn_finished_write(mp); 392 VOP_UNLOCK(vp, 0, td); 393 } 394 return (error); 395} 396 397/* 398 * Package up an I/O request on a vnode into a uio and do it. The I/O 399 * request is split up into smaller chunks and we try to avoid saturating 400 * the buffer cache while potentially holding a vnode locked, so we 401 * check bwillwrite() before calling vn_rdwr(). We also call uio_yield() 402 * to give other processes a chance to lock the vnode (either other processes 403 * core'ing the same binary, or unrelated processes scanning the directory). 404 */ 405int 406vn_rdwr_inchunks(rw, vp, base, len, offset, segflg, ioflg, cred, aresid, td) 407 enum uio_rw rw; 408 struct vnode *vp; 409 caddr_t base; 410 int len; 411 off_t offset; 412 enum uio_seg segflg; 413 int ioflg; 414 struct ucred *cred; 415 int *aresid; 416 struct thread *td; 417{ 418 int error = 0; 419 420 do { 421 int chunk = (len > MAXBSIZE) ? MAXBSIZE : len; 422 423 if (rw != UIO_READ && vp->v_type == VREG) 424 bwillwrite(); 425 error = vn_rdwr(rw, vp, base, chunk, offset, segflg, 426 ioflg, cred, aresid, td); 427 len -= chunk; /* aresid calc already includes length */ 428 if (error) 429 break; 430 offset += chunk; 431 base += chunk; 432 uio_yield(); 433 } while (len); 434 if (aresid) 435 *aresid += len; 436 return (error); 437} 438 439/* 440 * File table vnode read routine. 441 */ 442static int 443vn_read(fp, uio, cred, flags, td) 444 struct file *fp; 445 struct uio *uio; 446 struct ucred *cred; 447 struct thread *td; 448 int flags; 449{ 450 struct vnode *vp; 451 int error, ioflag; 452 453 mtx_lock(&Giant); 454 KASSERT(uio->uio_td == td, ("uio_td %p is not td %p", 455 uio->uio_td, td)); 456 vp = (struct vnode *)fp->f_data; 457 ioflag = 0; 458 if (fp->f_flag & FNONBLOCK) 459 ioflag |= IO_NDELAY; 460 if (fp->f_flag & O_DIRECT) 461 ioflag |= IO_DIRECT; 462 VOP_LEASE(vp, td, cred, LEASE_READ); 463 vn_lock(vp, LK_SHARED | LK_NOPAUSE | LK_RETRY, td); 464 if ((flags & FOF_OFFSET) == 0) 465 uio->uio_offset = fp->f_offset; 466 467 ioflag |= sequential_heuristic(uio, fp); 468 469 error = VOP_READ(vp, uio, ioflag, cred); 470 if ((flags & FOF_OFFSET) == 0) 471 fp->f_offset = uio->uio_offset; 472 fp->f_nextoff = uio->uio_offset; 473 VOP_UNLOCK(vp, 0, td); 474 mtx_unlock(&Giant); 475 return (error); 476} 477 478/* 479 * File table vnode write routine. 480 */ 481static int 482vn_write(fp, uio, cred, flags, td) 483 struct file *fp; 484 struct uio *uio; 485 struct ucred *cred; 486 struct thread *td; 487 int flags; 488{ 489 struct vnode *vp; 490 struct mount *mp; 491 int error, ioflag; 492 493 mtx_lock(&Giant); 494 KASSERT(uio->uio_td == td, ("uio_td %p is not td %p", 495 uio->uio_td, td)); 496 vp = (struct vnode *)fp->f_data; 497 if (vp->v_type == VREG) 498 bwillwrite(); 499 ioflag = IO_UNIT; 500 if (vp->v_type == VREG && (fp->f_flag & O_APPEND)) 501 ioflag |= IO_APPEND; 502 if (fp->f_flag & FNONBLOCK) 503 ioflag |= IO_NDELAY; 504 if (fp->f_flag & O_DIRECT) 505 ioflag |= IO_DIRECT; 506 if ((fp->f_flag & O_FSYNC) || 507 (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))) 508 ioflag |= IO_SYNC; 509 mp = NULL; 510 if (vp->v_type != VCHR && 511 (error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0) { 512 mtx_unlock(&Giant); 513 return (error); 514 } 515 VOP_LEASE(vp, td, cred, LEASE_WRITE); 516 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 517 if ((flags & FOF_OFFSET) == 0) 518 uio->uio_offset = fp->f_offset; 519 ioflag |= sequential_heuristic(uio, fp); 520 error = VOP_WRITE(vp, uio, ioflag, cred); 521 if ((flags & FOF_OFFSET) == 0) 522 fp->f_offset = uio->uio_offset; 523 fp->f_nextoff = uio->uio_offset; 524 VOP_UNLOCK(vp, 0, td); 525 vn_finished_write(mp); 526 mtx_unlock(&Giant); 527 return (error); 528} 529 530/* 531 * File table vnode stat routine. 532 */ 533static int 534vn_statfile(fp, sb, td) 535 struct file *fp; 536 struct stat *sb; 537 struct thread *td; 538{ 539 struct vnode *vp = (struct vnode *)fp->f_data; 540 int error; 541 542 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 543 error = vn_stat(vp, sb, td); 544 VOP_UNLOCK(vp, 0, td); 545 546 return (error); 547} 548 549/* 550 * Stat a vnode; implementation for the stat syscall 551 */ 552int 553vn_stat(vp, sb, td) 554 struct vnode *vp; 555 register struct stat *sb; 556 struct thread *td; 557{ 558 struct vattr vattr; 559 register struct vattr *vap; 560 int error; 561 u_short mode; 562 563 vap = &vattr; 564 error = VOP_GETATTR(vp, vap, td->td_ucred, td); 565 if (error) 566 return (error); 567 568 /* 569 * Zero the spare stat fields 570 */ 571 bzero(sb, sizeof *sb); 572 573 /* 574 * Copy from vattr table 575 */ 576 if (vap->va_fsid != VNOVAL) 577 sb->st_dev = vap->va_fsid; 578 else 579 sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0]; 580 sb->st_ino = vap->va_fileid; 581 mode = vap->va_mode; 582 switch (vap->va_type) { 583 case VREG: 584 mode |= S_IFREG; 585 break; 586 case VDIR: 587 mode |= S_IFDIR; 588 break; 589 case VBLK: 590 mode |= S_IFBLK; 591 break; 592 case VCHR: 593 mode |= S_IFCHR; 594 break; 595 case VLNK: 596 mode |= S_IFLNK; 597 /* This is a cosmetic change, symlinks do not have a mode. */ 598 if (vp->v_mount->mnt_flag & MNT_NOSYMFOLLOW) 599 sb->st_mode &= ~ACCESSPERMS; /* 0000 */ 600 else 601 sb->st_mode |= ACCESSPERMS; /* 0777 */ 602 break; 603 case VSOCK: 604 mode |= S_IFSOCK; 605 break; 606 case VFIFO: 607 mode |= S_IFIFO; 608 break; 609 default: 610 return (EBADF); 611 }; 612 sb->st_mode = mode; 613 sb->st_nlink = vap->va_nlink; 614 sb->st_uid = vap->va_uid; 615 sb->st_gid = vap->va_gid; 616 sb->st_rdev = vap->va_rdev; 617 if (vap->va_size > OFF_MAX) 618 return (EOVERFLOW); 619 sb->st_size = vap->va_size; 620 sb->st_atimespec = vap->va_atime; 621 sb->st_mtimespec = vap->va_mtime; 622 sb->st_ctimespec = vap->va_ctime;
|
624 625 /* 626 * According to www.opengroup.org, the meaning of st_blksize is 627 * "a filesystem-specific preferred I/O block size for this 628 * object. In some filesystem types, this may vary from file 629 * to file" 630 * Default to PAGE_SIZE after much discussion. 631 */ 632 633 if (vap->va_type == VREG) { 634 sb->st_blksize = vap->va_blocksize; 635 } else if (vn_isdisk(vp, NULL)) { 636 sb->st_blksize = vp->v_rdev->si_bsize_best; 637 if (sb->st_blksize < vp->v_rdev->si_bsize_phys) 638 sb->st_blksize = vp->v_rdev->si_bsize_phys; 639 if (sb->st_blksize < BLKDEV_IOSIZE) 640 sb->st_blksize = BLKDEV_IOSIZE; 641 } else { 642 sb->st_blksize = PAGE_SIZE; 643 } 644 645 sb->st_flags = vap->va_flags; 646 if (suser(td)) 647 sb->st_gen = 0; 648 else 649 sb->st_gen = vap->va_gen; 650 651#if (S_BLKSIZE == 512) 652 /* Optimize this case */ 653 sb->st_blocks = vap->va_bytes >> 9; 654#else 655 sb->st_blocks = vap->va_bytes / S_BLKSIZE; 656#endif 657 return (0); 658} 659 660/* 661 * File table vnode ioctl routine. 662 */ 663static int 664vn_ioctl(fp, com, data, td) 665 struct file *fp; 666 u_long com; 667 void *data; 668 struct thread *td; 669{ 670 register struct vnode *vp = ((struct vnode *)fp->f_data); 671 struct vnode *vpold; 672 struct vattr vattr; 673 int error; 674 675 switch (vp->v_type) { 676 677 case VREG: 678 case VDIR: 679 if (com == FIONREAD) { 680 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 681 error = VOP_GETATTR(vp, &vattr, td->td_ucred, td); 682 VOP_UNLOCK(vp, 0, td); 683 if (error) 684 return (error); 685 *(int *)data = vattr.va_size - fp->f_offset; 686 return (0); 687 } 688 if (com == FIONBIO || com == FIOASYNC) /* XXX */ 689 return (0); /* XXX */ 690 /* fall into ... */ 691 692 default: 693#if 0 694 return (ENOTTY); 695#endif 696 case VFIFO: 697 case VCHR: 698 case VBLK: 699 if (com == FIODTYPE) { 700 if (vp->v_type != VCHR && vp->v_type != VBLK) 701 return (ENOTTY); 702 *(int *)data = devsw(vp->v_rdev)->d_flags & D_TYPEMASK; 703 return (0); 704 } 705 error = VOP_IOCTL(vp, com, data, fp->f_flag, td->td_ucred, td); 706 if (error == 0 && com == TIOCSCTTY) { 707 708 /* Do nothing if reassigning same control tty */ 709 sx_slock(&proctree_lock); 710 if (td->td_proc->p_session->s_ttyvp == vp) { 711 sx_sunlock(&proctree_lock); 712 return (0); 713 } 714 715 vpold = td->td_proc->p_session->s_ttyvp; 716 VREF(vp); 717 SESS_LOCK(td->td_proc->p_session); 718 td->td_proc->p_session->s_ttyvp = vp; 719 SESS_UNLOCK(td->td_proc->p_session); 720 721 sx_sunlock(&proctree_lock); 722 723 /* Get rid of reference to old control tty */ 724 if (vpold) 725 vrele(vpold); 726 } 727 return (error); 728 } 729} 730 731/* 732 * File table vnode poll routine. 733 */ 734static int 735vn_poll(fp, events, cred, td) 736 struct file *fp; 737 int events; 738 struct ucred *cred; 739 struct thread *td; 740{ 741 742 return (VOP_POLL(((struct vnode *)fp->f_data), events, cred, td)); 743} 744 745/* 746 * Check that the vnode is still valid, and if so 747 * acquire requested lock. 748 */ 749int 750#ifndef DEBUG_LOCKS 751vn_lock(vp, flags, td) 752#else 753debug_vn_lock(vp, flags, td, filename, line) 754#endif 755 struct vnode *vp; 756 int flags; 757 struct thread *td; 758#ifdef DEBUG_LOCKS 759 const char *filename; 760 int line; 761#endif 762{ 763 int error; 764 765 do { 766 if ((flags & LK_INTERLOCK) == 0) 767 mtx_lock(&vp->v_interlock); 768 if ((vp->v_flag & VXLOCK) && vp->v_vxproc != curthread) { 769 vp->v_flag |= VXWANT; 770 msleep(vp, &vp->v_interlock, PINOD | PDROP, 771 "vn_lock", 0); 772 error = ENOENT; 773 } else { 774#if 0 775 /* this can now occur in normal operation */ 776 if (vp->v_vxproc != NULL) 777 log(LOG_INFO, "VXLOCK interlock avoided in vn_lock\n"); 778#endif 779#ifdef DEBUG_LOCKS 780 vp->filename = filename; 781 vp->line = line; 782#endif 783 error = VOP_LOCK(vp, 784 flags | LK_NOPAUSE | LK_INTERLOCK, td); 785 if (error == 0) 786 return (error); 787 } 788 flags &= ~LK_INTERLOCK; 789 } while (flags & LK_RETRY); 790 return (error); 791} 792 793/* 794 * File table vnode close routine. 795 */ 796static int 797vn_closefile(fp, td) 798 struct file *fp; 799 struct thread *td; 800{ 801 802 fp->f_ops = &badfileops; 803 return (vn_close(((struct vnode *)fp->f_data), fp->f_flag, 804 fp->f_cred, td)); 805} 806 807/* 808 * Preparing to start a filesystem write operation. If the operation is 809 * permitted, then we bump the count of operations in progress and 810 * proceed. If a suspend request is in progress, we wait until the 811 * suspension is over, and then proceed. 812 */ 813int 814vn_start_write(vp, mpp, flags) 815 struct vnode *vp; 816 struct mount **mpp; 817 int flags; 818{ 819 struct mount *mp; 820 int error; 821 822 /* 823 * If a vnode is provided, get and return the mount point that 824 * to which it will write. 825 */ 826 if (vp != NULL) { 827 if ((error = VOP_GETWRITEMOUNT(vp, mpp)) != 0) { 828 *mpp = NULL; 829 if (error != EOPNOTSUPP) 830 return (error); 831 return (0); 832 } 833 } 834 if ((mp = *mpp) == NULL) 835 return (0); 836 /* 837 * Check on status of suspension. 838 */ 839 while ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) { 840 if (flags & V_NOWAIT) 841 return (EWOULDBLOCK); 842 error = tsleep(&mp->mnt_flag, (PUSER - 1) | (flags & PCATCH), 843 "suspfs", 0); 844 if (error) 845 return (error); 846 } 847 if (flags & V_XSLEEP) 848 return (0); 849 mp->mnt_writeopcount++; 850 return (0); 851} 852 853/* 854 * Secondary suspension. Used by operations such as vop_inactive 855 * routines that are needed by the higher level functions. These 856 * are allowed to proceed until all the higher level functions have 857 * completed (indicated by mnt_writeopcount dropping to zero). At that 858 * time, these operations are halted until the suspension is over. 859 */ 860int 861vn_write_suspend_wait(vp, mp, flags) 862 struct vnode *vp; 863 struct mount *mp; 864 int flags; 865{ 866 int error; 867 868 if (vp != NULL) { 869 if ((error = VOP_GETWRITEMOUNT(vp, &mp)) != 0) { 870 if (error != EOPNOTSUPP) 871 return (error); 872 return (0); 873 } 874 } 875 /* 876 * If we are not suspended or have not yet reached suspended 877 * mode, then let the operation proceed. 878 */ 879 if (mp == NULL || (mp->mnt_kern_flag & MNTK_SUSPENDED) == 0) 880 return (0); 881 if (flags & V_NOWAIT) 882 return (EWOULDBLOCK); 883 /* 884 * Wait for the suspension to finish. 885 */ 886 return (tsleep(&mp->mnt_flag, (PUSER - 1) | (flags & PCATCH), 887 "suspfs", 0)); 888} 889 890/* 891 * Filesystem write operation has completed. If we are suspending and this 892 * operation is the last one, notify the suspender that the suspension is 893 * now in effect. 894 */ 895void 896vn_finished_write(mp) 897 struct mount *mp; 898{ 899 900 if (mp == NULL) 901 return; 902 mp->mnt_writeopcount--; 903 if (mp->mnt_writeopcount < 0) 904 panic("vn_finished_write: neg cnt"); 905 if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 && 906 mp->mnt_writeopcount <= 0) 907 wakeup(&mp->mnt_writeopcount); 908} 909 910/* 911 * Request a filesystem to suspend write operations. 912 */ 913void 914vfs_write_suspend(mp) 915 struct mount *mp; 916{ 917 struct thread *td = curthread; 918 919 if (mp->mnt_kern_flag & MNTK_SUSPEND) 920 return; 921 mp->mnt_kern_flag |= MNTK_SUSPEND; 922 if (mp->mnt_writeopcount > 0) 923 (void) tsleep(&mp->mnt_writeopcount, PUSER - 1, "suspwt", 0); 924 VFS_SYNC(mp, MNT_WAIT, td->td_ucred, td); 925 mp->mnt_kern_flag |= MNTK_SUSPENDED; 926} 927 928/* 929 * Request a filesystem to resume write operations. 930 */ 931void 932vfs_write_resume(mp) 933 struct mount *mp; 934{ 935 936 if ((mp->mnt_kern_flag & MNTK_SUSPEND) == 0) 937 return; 938 mp->mnt_kern_flag &= ~(MNTK_SUSPEND | MNTK_SUSPENDED); 939 wakeup(&mp->mnt_writeopcount); 940 wakeup(&mp->mnt_flag); 941} 942 943/* 944 * Implement kqueues for files by translating it to vnode operation. 945 */ 946static int 947vn_kqfilter(struct file *fp, struct knote *kn) 948{ 949 950 return (VOP_KQFILTER(((struct vnode *)fp->f_data), kn)); 951} 952 953/* 954 * Simplified in-kernel wrapper calls for extended attribute access. 955 * Both calls pass in a NULL credential, authorizing as "kernel" access. 956 * Set IO_NODELOCKED in ioflg if the vnode is already locked. 957 */ 958int 959vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace, 960 const char *attrname, int *buflen, char *buf, struct thread *td) 961{ 962 struct uio auio; 963 struct iovec iov; 964 int error; 965 966 iov.iov_len = *buflen; 967 iov.iov_base = buf; 968 969 auio.uio_iov = &iov; 970 auio.uio_iovcnt = 1; 971 auio.uio_rw = UIO_READ; 972 auio.uio_segflg = UIO_SYSSPACE; 973 auio.uio_td = td; 974 auio.uio_offset = 0; 975 auio.uio_resid = *buflen; 976 977 if ((ioflg & IO_NODELOCKED) == 0) 978 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 979 980 /* authorize attribute retrieval as kernel */ 981 error = VOP_GETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, NULL, 982 td); 983 984 if ((ioflg & IO_NODELOCKED) == 0) 985 VOP_UNLOCK(vp, 0, td); 986 987 if (error == 0) { 988 *buflen = *buflen - auio.uio_resid; 989 } 990 991 return (error); 992} 993 994/* 995 * XXX failure mode if partially written? 996 */ 997int 998vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace, 999 const char *attrname, int buflen, char *buf, struct thread *td) 1000{ 1001 struct uio auio; 1002 struct iovec iov; 1003 struct mount *mp; 1004 int error; 1005 1006 iov.iov_len = buflen; 1007 iov.iov_base = buf; 1008 1009 auio.uio_iov = &iov; 1010 auio.uio_iovcnt = 1; 1011 auio.uio_rw = UIO_WRITE; 1012 auio.uio_segflg = UIO_SYSSPACE; 1013 auio.uio_td = td; 1014 auio.uio_offset = 0; 1015 auio.uio_resid = buflen; 1016 1017 if ((ioflg & IO_NODELOCKED) == 0) { 1018 if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0) 1019 return (error); 1020 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 1021 } 1022 1023 /* authorize attribute setting as kernel */ 1024 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, td); 1025 1026 if ((ioflg & IO_NODELOCKED) == 0) { 1027 vn_finished_write(mp); 1028 VOP_UNLOCK(vp, 0, td); 1029 } 1030 1031 return (error); 1032} 1033 1034int 1035vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace, 1036 const char *attrname, struct thread *td) 1037{ 1038 struct mount *mp; 1039 int error; 1040 1041 if ((ioflg & IO_NODELOCKED) == 0) { 1042 if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0) 1043 return (error); 1044 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 1045 } 1046 1047 /* authorize attribute removal as kernel */ 1048 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, NULL, NULL, td); 1049 1050 if ((ioflg & IO_NODELOCKED) == 0) { 1051 vn_finished_write(mp); 1052 VOP_UNLOCK(vp, 0, td); 1053 } 1054 1055 return (error); 1056}
| 624 625 /* 626 * According to www.opengroup.org, the meaning of st_blksize is 627 * "a filesystem-specific preferred I/O block size for this 628 * object. In some filesystem types, this may vary from file 629 * to file" 630 * Default to PAGE_SIZE after much discussion. 631 */ 632 633 if (vap->va_type == VREG) { 634 sb->st_blksize = vap->va_blocksize; 635 } else if (vn_isdisk(vp, NULL)) { 636 sb->st_blksize = vp->v_rdev->si_bsize_best; 637 if (sb->st_blksize < vp->v_rdev->si_bsize_phys) 638 sb->st_blksize = vp->v_rdev->si_bsize_phys; 639 if (sb->st_blksize < BLKDEV_IOSIZE) 640 sb->st_blksize = BLKDEV_IOSIZE; 641 } else { 642 sb->st_blksize = PAGE_SIZE; 643 } 644 645 sb->st_flags = vap->va_flags; 646 if (suser(td)) 647 sb->st_gen = 0; 648 else 649 sb->st_gen = vap->va_gen; 650 651#if (S_BLKSIZE == 512) 652 /* Optimize this case */ 653 sb->st_blocks = vap->va_bytes >> 9; 654#else 655 sb->st_blocks = vap->va_bytes / S_BLKSIZE; 656#endif 657 return (0); 658} 659 660/* 661 * File table vnode ioctl routine. 662 */ 663static int 664vn_ioctl(fp, com, data, td) 665 struct file *fp; 666 u_long com; 667 void *data; 668 struct thread *td; 669{ 670 register struct vnode *vp = ((struct vnode *)fp->f_data); 671 struct vnode *vpold; 672 struct vattr vattr; 673 int error; 674 675 switch (vp->v_type) { 676 677 case VREG: 678 case VDIR: 679 if (com == FIONREAD) { 680 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 681 error = VOP_GETATTR(vp, &vattr, td->td_ucred, td); 682 VOP_UNLOCK(vp, 0, td); 683 if (error) 684 return (error); 685 *(int *)data = vattr.va_size - fp->f_offset; 686 return (0); 687 } 688 if (com == FIONBIO || com == FIOASYNC) /* XXX */ 689 return (0); /* XXX */ 690 /* fall into ... */ 691 692 default: 693#if 0 694 return (ENOTTY); 695#endif 696 case VFIFO: 697 case VCHR: 698 case VBLK: 699 if (com == FIODTYPE) { 700 if (vp->v_type != VCHR && vp->v_type != VBLK) 701 return (ENOTTY); 702 *(int *)data = devsw(vp->v_rdev)->d_flags & D_TYPEMASK; 703 return (0); 704 } 705 error = VOP_IOCTL(vp, com, data, fp->f_flag, td->td_ucred, td); 706 if (error == 0 && com == TIOCSCTTY) { 707 708 /* Do nothing if reassigning same control tty */ 709 sx_slock(&proctree_lock); 710 if (td->td_proc->p_session->s_ttyvp == vp) { 711 sx_sunlock(&proctree_lock); 712 return (0); 713 } 714 715 vpold = td->td_proc->p_session->s_ttyvp; 716 VREF(vp); 717 SESS_LOCK(td->td_proc->p_session); 718 td->td_proc->p_session->s_ttyvp = vp; 719 SESS_UNLOCK(td->td_proc->p_session); 720 721 sx_sunlock(&proctree_lock); 722 723 /* Get rid of reference to old control tty */ 724 if (vpold) 725 vrele(vpold); 726 } 727 return (error); 728 } 729} 730 731/* 732 * File table vnode poll routine. 733 */ 734static int 735vn_poll(fp, events, cred, td) 736 struct file *fp; 737 int events; 738 struct ucred *cred; 739 struct thread *td; 740{ 741 742 return (VOP_POLL(((struct vnode *)fp->f_data), events, cred, td)); 743} 744 745/* 746 * Check that the vnode is still valid, and if so 747 * acquire requested lock. 748 */ 749int 750#ifndef DEBUG_LOCKS 751vn_lock(vp, flags, td) 752#else 753debug_vn_lock(vp, flags, td, filename, line) 754#endif 755 struct vnode *vp; 756 int flags; 757 struct thread *td; 758#ifdef DEBUG_LOCKS 759 const char *filename; 760 int line; 761#endif 762{ 763 int error; 764 765 do { 766 if ((flags & LK_INTERLOCK) == 0) 767 mtx_lock(&vp->v_interlock); 768 if ((vp->v_flag & VXLOCK) && vp->v_vxproc != curthread) { 769 vp->v_flag |= VXWANT; 770 msleep(vp, &vp->v_interlock, PINOD | PDROP, 771 "vn_lock", 0); 772 error = ENOENT; 773 } else { 774#if 0 775 /* this can now occur in normal operation */ 776 if (vp->v_vxproc != NULL) 777 log(LOG_INFO, "VXLOCK interlock avoided in vn_lock\n"); 778#endif 779#ifdef DEBUG_LOCKS 780 vp->filename = filename; 781 vp->line = line; 782#endif 783 error = VOP_LOCK(vp, 784 flags | LK_NOPAUSE | LK_INTERLOCK, td); 785 if (error == 0) 786 return (error); 787 } 788 flags &= ~LK_INTERLOCK; 789 } while (flags & LK_RETRY); 790 return (error); 791} 792 793/* 794 * File table vnode close routine. 795 */ 796static int 797vn_closefile(fp, td) 798 struct file *fp; 799 struct thread *td; 800{ 801 802 fp->f_ops = &badfileops; 803 return (vn_close(((struct vnode *)fp->f_data), fp->f_flag, 804 fp->f_cred, td)); 805} 806 807/* 808 * Preparing to start a filesystem write operation. If the operation is 809 * permitted, then we bump the count of operations in progress and 810 * proceed. If a suspend request is in progress, we wait until the 811 * suspension is over, and then proceed. 812 */ 813int 814vn_start_write(vp, mpp, flags) 815 struct vnode *vp; 816 struct mount **mpp; 817 int flags; 818{ 819 struct mount *mp; 820 int error; 821 822 /* 823 * If a vnode is provided, get and return the mount point that 824 * to which it will write. 825 */ 826 if (vp != NULL) { 827 if ((error = VOP_GETWRITEMOUNT(vp, mpp)) != 0) { 828 *mpp = NULL; 829 if (error != EOPNOTSUPP) 830 return (error); 831 return (0); 832 } 833 } 834 if ((mp = *mpp) == NULL) 835 return (0); 836 /* 837 * Check on status of suspension. 838 */ 839 while ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) { 840 if (flags & V_NOWAIT) 841 return (EWOULDBLOCK); 842 error = tsleep(&mp->mnt_flag, (PUSER - 1) | (flags & PCATCH), 843 "suspfs", 0); 844 if (error) 845 return (error); 846 } 847 if (flags & V_XSLEEP) 848 return (0); 849 mp->mnt_writeopcount++; 850 return (0); 851} 852 853/* 854 * Secondary suspension. Used by operations such as vop_inactive 855 * routines that are needed by the higher level functions. These 856 * are allowed to proceed until all the higher level functions have 857 * completed (indicated by mnt_writeopcount dropping to zero). At that 858 * time, these operations are halted until the suspension is over. 859 */ 860int 861vn_write_suspend_wait(vp, mp, flags) 862 struct vnode *vp; 863 struct mount *mp; 864 int flags; 865{ 866 int error; 867 868 if (vp != NULL) { 869 if ((error = VOP_GETWRITEMOUNT(vp, &mp)) != 0) { 870 if (error != EOPNOTSUPP) 871 return (error); 872 return (0); 873 } 874 } 875 /* 876 * If we are not suspended or have not yet reached suspended 877 * mode, then let the operation proceed. 878 */ 879 if (mp == NULL || (mp->mnt_kern_flag & MNTK_SUSPENDED) == 0) 880 return (0); 881 if (flags & V_NOWAIT) 882 return (EWOULDBLOCK); 883 /* 884 * Wait for the suspension to finish. 885 */ 886 return (tsleep(&mp->mnt_flag, (PUSER - 1) | (flags & PCATCH), 887 "suspfs", 0)); 888} 889 890/* 891 * Filesystem write operation has completed. If we are suspending and this 892 * operation is the last one, notify the suspender that the suspension is 893 * now in effect. 894 */ 895void 896vn_finished_write(mp) 897 struct mount *mp; 898{ 899 900 if (mp == NULL) 901 return; 902 mp->mnt_writeopcount--; 903 if (mp->mnt_writeopcount < 0) 904 panic("vn_finished_write: neg cnt"); 905 if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 && 906 mp->mnt_writeopcount <= 0) 907 wakeup(&mp->mnt_writeopcount); 908} 909 910/* 911 * Request a filesystem to suspend write operations. 912 */ 913void 914vfs_write_suspend(mp) 915 struct mount *mp; 916{ 917 struct thread *td = curthread; 918 919 if (mp->mnt_kern_flag & MNTK_SUSPEND) 920 return; 921 mp->mnt_kern_flag |= MNTK_SUSPEND; 922 if (mp->mnt_writeopcount > 0) 923 (void) tsleep(&mp->mnt_writeopcount, PUSER - 1, "suspwt", 0); 924 VFS_SYNC(mp, MNT_WAIT, td->td_ucred, td); 925 mp->mnt_kern_flag |= MNTK_SUSPENDED; 926} 927 928/* 929 * Request a filesystem to resume write operations. 930 */ 931void 932vfs_write_resume(mp) 933 struct mount *mp; 934{ 935 936 if ((mp->mnt_kern_flag & MNTK_SUSPEND) == 0) 937 return; 938 mp->mnt_kern_flag &= ~(MNTK_SUSPEND | MNTK_SUSPENDED); 939 wakeup(&mp->mnt_writeopcount); 940 wakeup(&mp->mnt_flag); 941} 942 943/* 944 * Implement kqueues for files by translating it to vnode operation. 945 */ 946static int 947vn_kqfilter(struct file *fp, struct knote *kn) 948{ 949 950 return (VOP_KQFILTER(((struct vnode *)fp->f_data), kn)); 951} 952 953/* 954 * Simplified in-kernel wrapper calls for extended attribute access. 955 * Both calls pass in a NULL credential, authorizing as "kernel" access. 956 * Set IO_NODELOCKED in ioflg if the vnode is already locked. 957 */ 958int 959vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace, 960 const char *attrname, int *buflen, char *buf, struct thread *td) 961{ 962 struct uio auio; 963 struct iovec iov; 964 int error; 965 966 iov.iov_len = *buflen; 967 iov.iov_base = buf; 968 969 auio.uio_iov = &iov; 970 auio.uio_iovcnt = 1; 971 auio.uio_rw = UIO_READ; 972 auio.uio_segflg = UIO_SYSSPACE; 973 auio.uio_td = td; 974 auio.uio_offset = 0; 975 auio.uio_resid = *buflen; 976 977 if ((ioflg & IO_NODELOCKED) == 0) 978 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 979 980 /* authorize attribute retrieval as kernel */ 981 error = VOP_GETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, NULL, 982 td); 983 984 if ((ioflg & IO_NODELOCKED) == 0) 985 VOP_UNLOCK(vp, 0, td); 986 987 if (error == 0) { 988 *buflen = *buflen - auio.uio_resid; 989 } 990 991 return (error); 992} 993 994/* 995 * XXX failure mode if partially written? 996 */ 997int 998vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace, 999 const char *attrname, int buflen, char *buf, struct thread *td) 1000{ 1001 struct uio auio; 1002 struct iovec iov; 1003 struct mount *mp; 1004 int error; 1005 1006 iov.iov_len = buflen; 1007 iov.iov_base = buf; 1008 1009 auio.uio_iov = &iov; 1010 auio.uio_iovcnt = 1; 1011 auio.uio_rw = UIO_WRITE; 1012 auio.uio_segflg = UIO_SYSSPACE; 1013 auio.uio_td = td; 1014 auio.uio_offset = 0; 1015 auio.uio_resid = buflen; 1016 1017 if ((ioflg & IO_NODELOCKED) == 0) { 1018 if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0) 1019 return (error); 1020 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 1021 } 1022 1023 /* authorize attribute setting as kernel */ 1024 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, td); 1025 1026 if ((ioflg & IO_NODELOCKED) == 0) { 1027 vn_finished_write(mp); 1028 VOP_UNLOCK(vp, 0, td); 1029 } 1030 1031 return (error); 1032} 1033 1034int 1035vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace, 1036 const char *attrname, struct thread *td) 1037{ 1038 struct mount *mp; 1039 int error; 1040 1041 if ((ioflg & IO_NODELOCKED) == 0) { 1042 if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0) 1043 return (error); 1044 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 1045 } 1046 1047 /* authorize attribute removal as kernel */ 1048 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, NULL, NULL, td); 1049 1050 if ((ioflg & IO_NODELOCKED) == 0) { 1051 vn_finished_write(mp); 1052 VOP_UNLOCK(vp, 0, td); 1053 } 1054 1055 return (error); 1056}
|