1/*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed 8 * to Berkeley by John Heidemann of the UCLA Ficus project. 9 * 10 * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 */ 36 37#include <sys/cdefs.h> 38__FBSDID("$FreeBSD$"); 39 40#include <sys/param.h> 41#include <sys/systm.h> 42#include <sys/bio.h> 43#include <sys/buf.h> 44#include <sys/conf.h> 45#include <sys/event.h> 46#include <sys/kernel.h> 47#include <sys/limits.h> 48#include <sys/lock.h> 49#include <sys/lockf.h> 50#include <sys/malloc.h> 51#include <sys/mount.h> 52#include <sys/namei.h> 53#include <sys/rwlock.h> 54#include <sys/fcntl.h> 55#include <sys/unistd.h> 56#include <sys/vnode.h> 57#include <sys/dirent.h> 58#include <sys/poll.h> 59 60#include <security/mac/mac_framework.h> 61 62#include <vm/vm.h> 63#include <vm/vm_object.h> 64#include <vm/vm_extern.h> 65#include <vm/pmap.h> 66#include <vm/vm_map.h> 67#include <vm/vm_page.h> 68#include <vm/vm_pager.h> 69#include <vm/vnode_pager.h> 70 71static int vop_nolookup(struct vop_lookup_args *); 72static int vop_norename(struct vop_rename_args *); 73static int vop_nostrategy(struct vop_strategy_args *); 74static int get_next_dirent(struct vnode *vp, struct dirent **dpp, 75 char *dirbuf, int dirbuflen, off_t *off, 76 char **cpos, int *len, int *eofflag, 77 struct thread *td); 78static int dirent_exists(struct vnode *vp, const char *dirname, 79 struct thread *td); 80 81#define DIRENT_MINSIZE (sizeof(struct dirent) - (MAXNAMLEN+1) + 4) 82 83static int vop_stdis_text(struct vop_is_text_args *ap); 84static int vop_stdunset_text(struct vop_unset_text_args *ap); 85static int vop_stdadd_writecount(struct vop_add_writecount_args *ap); 86static int vop_stdfdatasync(struct vop_fdatasync_args *ap); 87static int vop_stdgetpages_async(struct vop_getpages_async_args *ap); 88 89/* 90 * This vnode table stores what we want to do if the filesystem doesn't 91 * implement a particular VOP. 92 * 93 * If there is no specific entry here, we will return EOPNOTSUPP. 94 * 95 * Note that every filesystem has to implement either vop_access 96 * or vop_accessx; failing to do so will result in immediate crash 97 * due to stack overflow, as vop_stdaccess() calls vop_stdaccessx(), 98 * which calls vop_stdaccess() etc. 99 */ 100 101struct vop_vector default_vnodeops = { 102 .vop_default = NULL, 103 .vop_bypass = VOP_EOPNOTSUPP, 104 105 .vop_access = vop_stdaccess, 106 .vop_accessx = vop_stdaccessx, 107 .vop_advise = vop_stdadvise, 108 .vop_advlock = vop_stdadvlock, 109 .vop_advlockasync = vop_stdadvlockasync, 110 .vop_advlockpurge = vop_stdadvlockpurge, 111 .vop_allocate = vop_stdallocate, 112 .vop_bmap = vop_stdbmap, 113 .vop_close = VOP_NULL, 114 .vop_fsync = VOP_NULL, 115 .vop_fdatasync = vop_stdfdatasync, 116 .vop_getpages = vop_stdgetpages, 117 .vop_getpages_async = vop_stdgetpages_async, 118 .vop_getwritemount = vop_stdgetwritemount, 119 .vop_inactive = VOP_NULL, 120 .vop_ioctl = VOP_ENOTTY, 121 .vop_kqfilter = vop_stdkqfilter, 122 .vop_islocked = vop_stdislocked, 123 .vop_lock1 = vop_stdlock, 124 .vop_lookup = vop_nolookup, 125 .vop_open = VOP_NULL, 126 .vop_pathconf = VOP_EINVAL, 127 .vop_poll = vop_nopoll, 128 .vop_putpages = vop_stdputpages, 129 .vop_readlink = VOP_EINVAL, 130 .vop_rename = vop_norename, 131 .vop_revoke = VOP_PANIC, 132 .vop_strategy = vop_nostrategy, 133 .vop_unlock = vop_stdunlock, 134 .vop_vptocnp = vop_stdvptocnp, 135 .vop_vptofh = vop_stdvptofh, 136 .vop_unp_bind = vop_stdunp_bind, 137 .vop_unp_connect = vop_stdunp_connect, 138 .vop_unp_detach = vop_stdunp_detach, 139 .vop_is_text = vop_stdis_text, 140 .vop_set_text = vop_stdset_text, 141 .vop_unset_text = vop_stdunset_text, 142 .vop_add_writecount = vop_stdadd_writecount, 143}; 144 145/* 146 * Series of placeholder functions for various error returns for 147 * VOPs. 148 */ 149 150int 151vop_eopnotsupp(struct vop_generic_args *ap) 152{ 153 /* 154 printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name); 155 */ 156 157 return (EOPNOTSUPP); 158} 159 160int 161vop_ebadf(struct vop_generic_args *ap) 162{ 163 164 return (EBADF); 165} 166 167int 168vop_enotty(struct vop_generic_args *ap) 169{ 170 171 return (ENOTTY); 172} 173 174int 175vop_einval(struct vop_generic_args *ap) 176{ 177 178 return (EINVAL); 179} 180 181int 182vop_enoent(struct vop_generic_args *ap) 183{ 184 185 return (ENOENT); 186} 187 188int 189vop_null(struct vop_generic_args *ap) 190{ 191 192 return (0); 193} 194 195/* 196 * Helper function to panic on some bad VOPs in some filesystems. 197 */ 198int 199vop_panic(struct vop_generic_args *ap) 200{ 201 202 panic("filesystem goof: vop_panic[%s]", ap->a_desc->vdesc_name); 203} 204 205/* 206 * vop_std<something> and vop_no<something> are default functions for use by 207 * filesystems that need the "default reasonable" implementation for a 208 * particular operation. 209 * 210 * The documentation for the operations they implement exists (if it exists) 211 * in the VOP_<SOMETHING>(9) manpage (all uppercase). 212 */ 213 214/* 215 * Default vop for filesystems that do not support name lookup 216 */ 217static int 218vop_nolookup(ap) 219 struct vop_lookup_args /* { 220 struct vnode *a_dvp; 221 struct vnode **a_vpp; 222 struct componentname *a_cnp; 223 } */ *ap; 224{ 225 226 *ap->a_vpp = NULL; 227 return (ENOTDIR); 228} 229 230/* 231 * vop_norename: 232 * 233 * Handle unlock and reference counting for arguments of vop_rename 234 * for filesystems that do not implement rename operation. 235 */ 236static int 237vop_norename(struct vop_rename_args *ap) 238{ 239 240 vop_rename_fail(ap); 241 return (EOPNOTSUPP); 242} 243 244/* 245 * vop_nostrategy: 246 * 247 * Strategy routine for VFS devices that have none. 248 * 249 * BIO_ERROR and B_INVAL must be cleared prior to calling any strategy 250 * routine. Typically this is done for a BIO_READ strategy call. 251 * Typically B_INVAL is assumed to already be clear prior to a write 252 * and should not be cleared manually unless you just made the buffer 253 * invalid. BIO_ERROR should be cleared either way. 254 */ 255 256static int 257vop_nostrategy (struct vop_strategy_args *ap) 258{ 259 printf("No strategy for buffer at %p\n", ap->a_bp); 260 vn_printf(ap->a_vp, "vnode "); 261 ap->a_bp->b_ioflags |= BIO_ERROR; 262 ap->a_bp->b_error = EOPNOTSUPP; 263 bufdone(ap->a_bp); 264 return (EOPNOTSUPP); 265} 266 267static int 268get_next_dirent(struct vnode *vp, struct dirent **dpp, char *dirbuf, 269 int dirbuflen, off_t *off, char **cpos, int *len, 270 int *eofflag, struct thread *td) 271{ 272 int error, reclen; 273 struct uio uio; 274 struct iovec iov; 275 struct dirent *dp; 276 277 KASSERT(VOP_ISLOCKED(vp), ("vp %p is not locked", vp)); 278 KASSERT(vp->v_type == VDIR, ("vp %p is not a directory", vp)); 279 280 if (*len == 0) { 281 iov.iov_base = dirbuf; 282 iov.iov_len = dirbuflen; 283 284 uio.uio_iov = &iov; 285 uio.uio_iovcnt = 1; 286 uio.uio_offset = *off; 287 uio.uio_resid = dirbuflen; 288 uio.uio_segflg = UIO_SYSSPACE; 289 uio.uio_rw = UIO_READ; 290 uio.uio_td = td; 291 292 *eofflag = 0; 293 294#ifdef MAC 295 error = mac_vnode_check_readdir(td->td_ucred, vp); 296 if (error == 0) 297#endif 298 error = VOP_READDIR(vp, &uio, td->td_ucred, eofflag, 299 NULL, NULL); 300 if (error) 301 return (error); 302 303 *off = uio.uio_offset; 304 305 *cpos = dirbuf; 306 *len = (dirbuflen - uio.uio_resid); 307 308 if (*len == 0) 309 return (ENOENT); 310 } 311 312 dp = (struct dirent *)(*cpos); 313 reclen = dp->d_reclen; 314 *dpp = dp; 315 316 /* check for malformed directory.. */ 317 if (reclen < DIRENT_MINSIZE) 318 return (EINVAL); 319 320 *cpos += reclen; 321 *len -= reclen; 322 323 return (0); 324} 325 326/* 327 * Check if a named file exists in a given directory vnode. 328 */ 329static int 330dirent_exists(struct vnode *vp, const char *dirname, struct thread *td) 331{ 332 char *dirbuf, *cpos; 333 int error, eofflag, dirbuflen, len, found; 334 off_t off; 335 struct dirent *dp; 336 struct vattr va; 337 338 KASSERT(VOP_ISLOCKED(vp), ("vp %p is not locked", vp)); 339 KASSERT(vp->v_type == VDIR, ("vp %p is not a directory", vp)); 340 341 found = 0; 342 343 error = VOP_GETATTR(vp, &va, td->td_ucred); 344 if (error) 345 return (found); 346 347 dirbuflen = DEV_BSIZE; 348 if (dirbuflen < va.va_blocksize) 349 dirbuflen = va.va_blocksize; 350 dirbuf = (char *)malloc(dirbuflen, M_TEMP, M_WAITOK); 351 352 off = 0; 353 len = 0; 354 do { 355 error = get_next_dirent(vp, &dp, dirbuf, dirbuflen, &off, 356 &cpos, &len, &eofflag, td); 357 if (error) 358 goto out; 359 360 if (dp->d_type != DT_WHT && dp->d_fileno != 0 && 361 strcmp(dp->d_name, dirname) == 0) { 362 found = 1; 363 goto out; 364 } 365 } while (len > 0 || !eofflag); 366 367out: 368 free(dirbuf, M_TEMP); 369 return (found); 370} 371 372int 373vop_stdaccess(struct vop_access_args *ap) 374{ 375 376 KASSERT((ap->a_accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | 377 VAPPEND)) == 0, ("invalid bit in accmode")); 378 379 return (VOP_ACCESSX(ap->a_vp, ap->a_accmode, ap->a_cred, ap->a_td)); 380} 381 382int 383vop_stdaccessx(struct vop_accessx_args *ap) 384{ 385 int error; 386 accmode_t accmode = ap->a_accmode; 387 388 error = vfs_unixify_accmode(&accmode); 389 if (error != 0) 390 return (error); 391 392 if (accmode == 0) 393 return (0); 394 395 return (VOP_ACCESS(ap->a_vp, accmode, ap->a_cred, ap->a_td)); 396} 397 398/* 399 * Advisory record locking support 400 */ 401int 402vop_stdadvlock(struct vop_advlock_args *ap) 403{ 404 struct vnode *vp; 405 struct vattr vattr; 406 int error; 407 408 vp = ap->a_vp; 409 if (ap->a_fl->l_whence == SEEK_END) { 410 /* 411 * The NFSv4 server must avoid doing a vn_lock() here, since it 412 * can deadlock the nfsd threads, due to a LOR. Fortunately 413 * the NFSv4 server always uses SEEK_SET and this code is 414 * only required for the SEEK_END case. 415 */ 416 vn_lock(vp, LK_SHARED | LK_RETRY); 417 error = VOP_GETATTR(vp, &vattr, curthread->td_ucred); 418 VOP_UNLOCK(vp, 0); 419 if (error) 420 return (error); 421 } else 422 vattr.va_size = 0; 423 424 return (lf_advlock(ap, &(vp->v_lockf), vattr.va_size)); 425} 426 427int 428vop_stdadvlockasync(struct vop_advlockasync_args *ap) 429{ 430 struct vnode *vp; 431 struct vattr vattr; 432 int error; 433 434 vp = ap->a_vp; 435 if (ap->a_fl->l_whence == SEEK_END) { 436 /* The size argument is only needed for SEEK_END. */ 437 vn_lock(vp, LK_SHARED | LK_RETRY); 438 error = VOP_GETATTR(vp, &vattr, curthread->td_ucred); 439 VOP_UNLOCK(vp, 0); 440 if (error) 441 return (error); 442 } else 443 vattr.va_size = 0; 444 445 return (lf_advlockasync(ap, &(vp->v_lockf), vattr.va_size)); 446} 447 448int 449vop_stdadvlockpurge(struct vop_advlockpurge_args *ap) 450{ 451 struct vnode *vp; 452 453 vp = ap->a_vp; 454 lf_purgelocks(vp, &vp->v_lockf); 455 return (0); 456} 457 458/* 459 * vop_stdpathconf: 460 * 461 * Standard implementation of POSIX pathconf, to get information about limits 462 * for a filesystem. 463 * Override per filesystem for the case where the filesystem has smaller 464 * limits. 465 */ 466int 467vop_stdpathconf(ap) 468 struct vop_pathconf_args /* { 469 struct vnode *a_vp; 470 int a_name; 471 int *a_retval; 472 } */ *ap; 473{ 474 475 switch (ap->a_name) { 476 case _PC_ASYNC_IO: 477 *ap->a_retval = _POSIX_ASYNCHRONOUS_IO; 478 return (0); 479 case _PC_PATH_MAX: 480 *ap->a_retval = PATH_MAX; 481 return (0); 482 case _PC_ACL_EXTENDED: 483 case _PC_ACL_NFS4: 484 case _PC_CAP_PRESENT: 485 case _PC_INF_PRESENT: 486 case _PC_MAC_PRESENT: 487 *ap->a_retval = 0; 488 return (0); 489 default: 490 return (EINVAL); 491 } 492 /* NOTREACHED */ 493} 494 495/* 496 * Standard lock, unlock and islocked functions. 497 */ 498int 499vop_stdlock(ap) 500 struct vop_lock1_args /* { 501 struct vnode *a_vp; 502 int a_flags; 503 char *file; 504 int line; 505 } */ *ap; 506{ 507 struct vnode *vp = ap->a_vp; 508 struct mtx *ilk; 509 510 ilk = VI_MTX(vp); 511 return (lockmgr_lock_fast_path(vp->v_vnlock, ap->a_flags, 512 &ilk->lock_object, ap->a_file, ap->a_line)); 513} 514 515/* See above. */ 516int 517vop_stdunlock(ap) 518 struct vop_unlock_args /* { 519 struct vnode *a_vp; 520 int a_flags; 521 } */ *ap; 522{ 523 struct vnode *vp = ap->a_vp; 524 struct mtx *ilk; 525 526 ilk = VI_MTX(vp); 527 return (lockmgr_unlock_fast_path(vp->v_vnlock, ap->a_flags, 528 &ilk->lock_object)); 529} 530 531/* See above. */ 532int 533vop_stdislocked(ap) 534 struct vop_islocked_args /* { 535 struct vnode *a_vp; 536 } */ *ap; 537{ 538 539 return (lockstatus(ap->a_vp->v_vnlock)); 540} 541 542/* 543 * Return true for select/poll. 544 */ 545int 546vop_nopoll(ap) 547 struct vop_poll_args /* { 548 struct vnode *a_vp; 549 int a_events; 550 struct ucred *a_cred; 551 struct thread *a_td; 552 } */ *ap; 553{ 554 555 return (poll_no_poll(ap->a_events)); 556} 557 558/* 559 * Implement poll for local filesystems that support it. 560 */ 561int 562vop_stdpoll(ap) 563 struct vop_poll_args /* { 564 struct vnode *a_vp; 565 int a_events; 566 struct ucred *a_cred; 567 struct thread *a_td; 568 } */ *ap; 569{ 570 if (ap->a_events & ~POLLSTANDARD) 571 return (vn_pollrecord(ap->a_vp, ap->a_td, ap->a_events)); 572 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 573} 574 575/* 576 * Return our mount point, as we will take charge of the writes. 577 */ 578int 579vop_stdgetwritemount(ap) 580 struct vop_getwritemount_args /* { 581 struct vnode *a_vp; 582 struct mount **a_mpp; 583 } */ *ap; 584{ 585 struct mount *mp; 586 587 /* 588 * XXX Since this is called unlocked we may be recycled while 589 * attempting to ref the mount. If this is the case or mountpoint 590 * will be set to NULL. We only have to prevent this call from 591 * returning with a ref to an incorrect mountpoint. It is not 592 * harmful to return with a ref to our previous mountpoint. 593 */ 594 mp = ap->a_vp->v_mount; 595 if (mp != NULL) { 596 vfs_ref(mp); 597 if (mp != ap->a_vp->v_mount) { 598 vfs_rel(mp); 599 mp = NULL; 600 } 601 } 602 *(ap->a_mpp) = mp; 603 return (0); 604} 605 606/* 607 * If the file system doesn't implement VOP_BMAP, then return sensible defaults: 608 * - Return the vnode's bufobj instead of any underlying device's bufobj 609 * - Calculate the physical block number as if there were equal size 610 * consecutive blocks, but 611 * - Report no contiguous runs of blocks. 612 */ 613int 614vop_stdbmap(ap) 615 struct vop_bmap_args /* { 616 struct vnode *a_vp; 617 daddr_t a_bn; 618 struct bufobj **a_bop; 619 daddr_t *a_bnp; 620 int *a_runp; 621 int *a_runb; 622 } */ *ap; 623{ 624 625 if (ap->a_bop != NULL) 626 *ap->a_bop = &ap->a_vp->v_bufobj; 627 if (ap->a_bnp != NULL) 628 *ap->a_bnp = ap->a_bn * btodb(ap->a_vp->v_mount->mnt_stat.f_iosize); 629 if (ap->a_runp != NULL) 630 *ap->a_runp = 0; 631 if (ap->a_runb != NULL) 632 *ap->a_runb = 0; 633 return (0); 634} 635 636int 637vop_stdfsync(ap) 638 struct vop_fsync_args /* { 639 struct vnode *a_vp; 640 int a_waitfor; 641 struct thread *a_td; 642 } */ *ap; 643{ 644 645 return (vn_fsync_buf(ap->a_vp, ap->a_waitfor)); 646} 647 648static int 649vop_stdfdatasync(struct vop_fdatasync_args *ap) 650{ 651 652 return (VOP_FSYNC(ap->a_vp, MNT_WAIT, ap->a_td)); 653} 654 655int 656vop_stdfdatasync_buf(struct vop_fdatasync_args *ap) 657{ 658 659 return (vn_fsync_buf(ap->a_vp, MNT_WAIT)); 660} 661 662/* XXX Needs good comment and more info in the manpage (VOP_GETPAGES(9)). */ 663int 664vop_stdgetpages(ap) 665 struct vop_getpages_args /* { 666 struct vnode *a_vp; 667 vm_page_t *a_m; 668 int a_count; 669 int *a_rbehind; 670 int *a_rahead; 671 } */ *ap; 672{ 673 674 return vnode_pager_generic_getpages(ap->a_vp, ap->a_m, 675 ap->a_count, ap->a_rbehind, ap->a_rahead, NULL, NULL); 676} 677 678static int 679vop_stdgetpages_async(struct vop_getpages_async_args *ap) 680{ 681 int error; 682 683 error = VOP_GETPAGES(ap->a_vp, ap->a_m, ap->a_count, ap->a_rbehind, 684 ap->a_rahead); 685 if (ap->a_iodone != NULL) 686 ap->a_iodone(ap->a_arg, ap->a_m, ap->a_count, error); 687 return (error); 688} 689 690int 691vop_stdkqfilter(struct vop_kqfilter_args *ap) 692{ 693 return vfs_kqfilter(ap); 694} 695 696/* XXX Needs good comment and more info in the manpage (VOP_PUTPAGES(9)). */ 697int 698vop_stdputpages(ap) 699 struct vop_putpages_args /* { 700 struct vnode *a_vp; 701 vm_page_t *a_m; 702 int a_count; 703 int a_sync; 704 int *a_rtvals; 705 } */ *ap; 706{ 707 708 return vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count, 709 ap->a_sync, ap->a_rtvals); 710} 711 712int 713vop_stdvptofh(struct vop_vptofh_args *ap) 714{ 715 return (EOPNOTSUPP); 716} 717 718int 719vop_stdvptocnp(struct vop_vptocnp_args *ap) 720{ 721 struct vnode *vp = ap->a_vp; 722 struct vnode **dvp = ap->a_vpp; 723 struct ucred *cred = ap->a_cred; 724 char *buf = ap->a_buf; 725 int *buflen = ap->a_buflen; 726 char *dirbuf, *cpos; 727 int i, error, eofflag, dirbuflen, flags, locked, len, covered; 728 off_t off; 729 ino_t fileno; 730 struct vattr va; 731 struct nameidata nd; 732 struct thread *td; 733 struct dirent *dp; 734 struct vnode *mvp; 735 736 i = *buflen; 737 error = 0; 738 covered = 0; 739 td = curthread; 740 741 if (vp->v_type != VDIR) 742 return (ENOENT); 743 744 error = VOP_GETATTR(vp, &va, cred); 745 if (error) 746 return (error); 747 748 VREF(vp); 749 locked = VOP_ISLOCKED(vp); 750 VOP_UNLOCK(vp, 0); 751 NDINIT_ATVP(&nd, LOOKUP, FOLLOW | LOCKSHARED | LOCKLEAF, UIO_SYSSPACE, 752 "..", vp, td); 753 flags = FREAD; 754 error = vn_open_cred(&nd, &flags, 0, VN_OPEN_NOAUDIT, cred, NULL); 755 if (error) { 756 vn_lock(vp, locked | LK_RETRY); 757 return (error); 758 } 759 NDFREE(&nd, NDF_ONLY_PNBUF); 760 761 mvp = *dvp = nd.ni_vp; 762 763 if (vp->v_mount != (*dvp)->v_mount && 764 ((*dvp)->v_vflag & VV_ROOT) && 765 ((*dvp)->v_mount->mnt_flag & MNT_UNION)) { 766 *dvp = (*dvp)->v_mount->mnt_vnodecovered; 767 VREF(mvp); 768 VOP_UNLOCK(mvp, 0); 769 vn_close(mvp, FREAD, cred, td); 770 VREF(*dvp); 771 vn_lock(*dvp, LK_SHARED | LK_RETRY); 772 covered = 1; 773 } 774 775 fileno = va.va_fileid; 776 777 dirbuflen = DEV_BSIZE; 778 if (dirbuflen < va.va_blocksize) 779 dirbuflen = va.va_blocksize; 780 dirbuf = (char *)malloc(dirbuflen, M_TEMP, M_WAITOK); 781 782 if ((*dvp)->v_type != VDIR) { 783 error = ENOENT; 784 goto out; 785 } 786 787 off = 0; 788 len = 0; 789 do { 790 /* call VOP_READDIR of parent */ 791 error = get_next_dirent(*dvp, &dp, dirbuf, dirbuflen, &off, 792 &cpos, &len, &eofflag, td); 793 if (error) 794 goto out; 795 796 if ((dp->d_type != DT_WHT) && 797 (dp->d_fileno == fileno)) { 798 if (covered) { 799 VOP_UNLOCK(*dvp, 0); 800 vn_lock(mvp, LK_SHARED | LK_RETRY); 801 if (dirent_exists(mvp, dp->d_name, td)) { 802 error = ENOENT; 803 VOP_UNLOCK(mvp, 0); 804 vn_lock(*dvp, LK_SHARED | LK_RETRY); 805 goto out; 806 } 807 VOP_UNLOCK(mvp, 0); 808 vn_lock(*dvp, LK_SHARED | LK_RETRY); 809 } 810 i -= dp->d_namlen; 811 812 if (i < 0) { 813 error = ENOMEM; 814 goto out; 815 } 816 if (dp->d_namlen == 1 && dp->d_name[0] == '.') { 817 error = ENOENT; 818 } else { 819 bcopy(dp->d_name, buf + i, dp->d_namlen); 820 error = 0; 821 } 822 goto out; 823 } 824 } while (len > 0 || !eofflag); 825 error = ENOENT; 826 827out: 828 free(dirbuf, M_TEMP); 829 if (!error) { 830 *buflen = i; 831 vref(*dvp); 832 } 833 if (covered) { 834 vput(*dvp); 835 vrele(mvp); 836 } else { 837 VOP_UNLOCK(mvp, 0); 838 vn_close(mvp, FREAD, cred, td); 839 } 840 vn_lock(vp, locked | LK_RETRY); 841 return (error); 842} 843 844int 845vop_stdallocate(struct vop_allocate_args *ap) 846{ 847#ifdef __notyet__ 848 struct statfs *sfs; 849 off_t maxfilesize = 0; 850#endif 851 struct iovec aiov; 852 struct vattr vattr, *vap; 853 struct uio auio; 854 off_t fsize, len, cur, offset; 855 uint8_t *buf; 856 struct thread *td; 857 struct vnode *vp; 858 size_t iosize; 859 int error; 860 861 buf = NULL; 862 error = 0; 863 td = curthread; 864 vap = &vattr; 865 vp = ap->a_vp; 866 len = *ap->a_len; 867 offset = *ap->a_offset; 868 869 error = VOP_GETATTR(vp, vap, td->td_ucred); 870 if (error != 0) 871 goto out; 872 fsize = vap->va_size; 873 iosize = vap->va_blocksize; 874 if (iosize == 0) 875 iosize = BLKDEV_IOSIZE; 876 if (iosize > MAXPHYS) 877 iosize = MAXPHYS; 878 buf = malloc(iosize, M_TEMP, M_WAITOK); 879 880#ifdef __notyet__ 881 /* 882 * Check if the filesystem sets f_maxfilesize; if not use 883 * VOP_SETATTR to perform the check. 884 */ 885 sfs = malloc(sizeof(struct statfs), M_STATFS, M_WAITOK); 886 error = VFS_STATFS(vp->v_mount, sfs, td); 887 if (error == 0) 888 maxfilesize = sfs->f_maxfilesize; 889 free(sfs, M_STATFS); 890 if (error != 0) 891 goto out; 892 if (maxfilesize) { 893 if (offset > maxfilesize || len > maxfilesize || 894 offset + len > maxfilesize) { 895 error = EFBIG; 896 goto out; 897 } 898 } else 899#endif 900 if (offset + len > vap->va_size) { 901 /* 902 * Test offset + len against the filesystem's maxfilesize. 903 */ 904 VATTR_NULL(vap); 905 vap->va_size = offset + len; 906 error = VOP_SETATTR(vp, vap, td->td_ucred); 907 if (error != 0) 908 goto out; 909 VATTR_NULL(vap); 910 vap->va_size = fsize; 911 error = VOP_SETATTR(vp, vap, td->td_ucred); 912 if (error != 0) 913 goto out; 914 } 915 916 for (;;) { 917 /* 918 * Read and write back anything below the nominal file 919 * size. There's currently no way outside the filesystem 920 * to know whether this area is sparse or not. 921 */ 922 cur = iosize; 923 if ((offset % iosize) != 0) 924 cur -= (offset % iosize); 925 if (cur > len) 926 cur = len; 927 if (offset < fsize) { 928 aiov.iov_base = buf; 929 aiov.iov_len = cur; 930 auio.uio_iov = &aiov; 931 auio.uio_iovcnt = 1; 932 auio.uio_offset = offset; 933 auio.uio_resid = cur; 934 auio.uio_segflg = UIO_SYSSPACE; 935 auio.uio_rw = UIO_READ; 936 auio.uio_td = td; 937 error = VOP_READ(vp, &auio, 0, td->td_ucred); 938 if (error != 0) 939 break; 940 if (auio.uio_resid > 0) { 941 bzero(buf + cur - auio.uio_resid, 942 auio.uio_resid); 943 } 944 } else { 945 bzero(buf, cur); 946 } 947 948 aiov.iov_base = buf; 949 aiov.iov_len = cur; 950 auio.uio_iov = &aiov; 951 auio.uio_iovcnt = 1; 952 auio.uio_offset = offset; 953 auio.uio_resid = cur; 954 auio.uio_segflg = UIO_SYSSPACE; 955 auio.uio_rw = UIO_WRITE; 956 auio.uio_td = td; 957 958 error = VOP_WRITE(vp, &auio, 0, td->td_ucred); 959 if (error != 0) 960 break; 961 962 len -= cur; 963 offset += cur; 964 if (len == 0) 965 break; 966 if (should_yield()) 967 break; 968 } 969 970 out: 971 *ap->a_len = len; 972 *ap->a_offset = offset; 973 free(buf, M_TEMP); 974 return (error); 975} 976 977int 978vop_stdadvise(struct vop_advise_args *ap) 979{ 980 struct vnode *vp; 981 struct bufobj *bo; 982 daddr_t startn, endn; 983 off_t start, end; 984 int bsize, error; 985 986 vp = ap->a_vp; 987 switch (ap->a_advice) { 988 case POSIX_FADV_WILLNEED: 989 /* 990 * Do nothing for now. Filesystems should provide a 991 * custom method which starts an asynchronous read of 992 * the requested region. 993 */ 994 error = 0; 995 break; 996 case POSIX_FADV_DONTNEED: 997 error = 0; 998 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 999 if (vp->v_iflag & VI_DOOMED) { 1000 VOP_UNLOCK(vp, 0); 1001 break; 1002 } 1003 1004 /* 1005 * Deactivate pages in the specified range from the backing VM 1006 * object. Pages that are resident in the buffer cache will 1007 * remain wired until their corresponding buffers are released 1008 * below. 1009 */ 1010 if (vp->v_object != NULL) { 1011 start = trunc_page(ap->a_start); 1012 end = round_page(ap->a_end); 1013 VM_OBJECT_RLOCK(vp->v_object); 1014 vm_object_page_noreuse(vp->v_object, OFF_TO_IDX(start), 1015 OFF_TO_IDX(end)); 1016 VM_OBJECT_RUNLOCK(vp->v_object); 1017 } 1018 1019 bo = &vp->v_bufobj; 1020 BO_RLOCK(bo); 1021 bsize = vp->v_bufobj.bo_bsize; 1022 startn = ap->a_start / bsize; 1023 endn = ap->a_end / bsize; 1024 error = bnoreuselist(&bo->bo_clean, bo, startn, endn); 1025 if (error == 0) 1026 error = bnoreuselist(&bo->bo_dirty, bo, startn, endn); 1027 BO_RUNLOCK(bo); 1028 VOP_UNLOCK(vp, 0); 1029 break; 1030 default: 1031 error = EINVAL; 1032 break; 1033 } 1034 return (error); 1035} 1036 1037int 1038vop_stdunp_bind(struct vop_unp_bind_args *ap) 1039{ 1040 1041 ap->a_vp->v_unpcb = ap->a_unpcb; 1042 return (0); 1043} 1044 1045int 1046vop_stdunp_connect(struct vop_unp_connect_args *ap) 1047{ 1048 1049 *ap->a_unpcb = ap->a_vp->v_unpcb; 1050 return (0); 1051} 1052 1053int 1054vop_stdunp_detach(struct vop_unp_detach_args *ap) 1055{ 1056 1057 ap->a_vp->v_unpcb = NULL; 1058 return (0); 1059} 1060 1061static int 1062vop_stdis_text(struct vop_is_text_args *ap) 1063{ 1064 1065 return (ap->a_vp->v_writecount < 0); 1066} 1067 1068int 1069vop_stdset_text(struct vop_set_text_args *ap) 1070{ 1071 struct vnode *vp; 1072 struct mount *mp; 1073 int error; 1074 1075 vp = ap->a_vp; 1076 VI_LOCK(vp); 1077 if (vp->v_writecount > 0) { 1078 error = ETXTBSY; 1079 } else { 1080 /* 1081 * If requested by fs, keep a use reference to the 1082 * vnode until the last text reference is released. 1083 */ 1084 mp = vp->v_mount; 1085 if (mp != NULL && (mp->mnt_kern_flag & MNTK_TEXT_REFS) != 0 && 1086 vp->v_writecount == 0) { 1087 vp->v_iflag |= VI_TEXT_REF; 1088 vrefl(vp); 1089 } 1090 1091 vp->v_writecount--; 1092 error = 0; 1093 } 1094 VI_UNLOCK(vp); 1095 return (error); 1096} 1097 1098static int 1099vop_stdunset_text(struct vop_unset_text_args *ap) 1100{ 1101 struct vnode *vp; 1102 int error; 1103 bool last; 1104 1105 vp = ap->a_vp; 1106 last = false; 1107 VI_LOCK(vp); 1108 if (vp->v_writecount < 0) { 1109 if ((vp->v_iflag & VI_TEXT_REF) != 0 && 1110 vp->v_writecount == -1) { 1111 last = true; 1112 vp->v_iflag &= ~VI_TEXT_REF; 1113 } 1114 vp->v_writecount++; 1115 error = 0; 1116 } else { 1117 error = EINVAL; 1118 } 1119 VI_UNLOCK(vp); 1120 if (last) 1121 vunref(vp); 1122 return (error); 1123} 1124 1125static int 1126vop_stdadd_writecount(struct vop_add_writecount_args *ap) 1127{ 1128 struct vnode *vp; 1129 int error; 1130 1131 vp = ap->a_vp; 1132 VI_LOCK_FLAGS(vp, MTX_DUPOK); 1133 if (vp->v_writecount < 0) { 1134 error = ETXTBSY; 1135 } else { 1136 VNASSERT(vp->v_writecount + ap->a_inc >= 0, vp, 1137 ("neg writecount increment %d", ap->a_inc)); 1138 vp->v_writecount += ap->a_inc; 1139 error = 0; 1140 } 1141 VI_UNLOCK(vp); 1142 return (error); 1143} 1144 1145/* 1146 * vfs default ops 1147 * used to fill the vfs function table to get reasonable default return values. 1148 */ 1149int 1150vfs_stdroot (mp, flags, vpp) 1151 struct mount *mp; 1152 int flags; 1153 struct vnode **vpp; 1154{ 1155 1156 return (EOPNOTSUPP); 1157} 1158 1159int 1160vfs_stdstatfs (mp, sbp) 1161 struct mount *mp; 1162 struct statfs *sbp; 1163{ 1164 1165 return (EOPNOTSUPP); 1166} 1167 1168int 1169vfs_stdquotactl (mp, cmds, uid, arg) 1170 struct mount *mp; 1171 int cmds; 1172 uid_t uid; 1173 void *arg; 1174{ 1175 1176 return (EOPNOTSUPP); 1177} 1178 1179int 1180vfs_stdsync(mp, waitfor) 1181 struct mount *mp; 1182 int waitfor; 1183{ 1184 struct vnode *vp, *mvp; 1185 struct thread *td; 1186 int error, lockreq, allerror = 0; 1187 1188 td = curthread; 1189 lockreq = LK_EXCLUSIVE | LK_INTERLOCK; 1190 if (waitfor != MNT_WAIT) 1191 lockreq |= LK_NOWAIT; 1192 /* 1193 * Force stale buffer cache information to be flushed. 1194 */ 1195loop: 1196 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 1197 if (vp->v_bufobj.bo_dirty.bv_cnt == 0) { 1198 VI_UNLOCK(vp); 1199 continue; 1200 } 1201 if ((error = vget(vp, lockreq, td)) != 0) { 1202 if (error == ENOENT) { 1203 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 1204 goto loop; 1205 } 1206 continue; 1207 } 1208 error = VOP_FSYNC(vp, waitfor, td); 1209 if (error) 1210 allerror = error; 1211 vput(vp); 1212 } 1213 return (allerror); 1214} 1215 1216int 1217vfs_stdnosync (mp, waitfor) 1218 struct mount *mp; 1219 int waitfor; 1220{ 1221 1222 return (0); 1223} 1224 1225int 1226vfs_stdvget (mp, ino, flags, vpp) 1227 struct mount *mp; 1228 ino_t ino; 1229 int flags; 1230 struct vnode **vpp; 1231{ 1232 1233 return (EOPNOTSUPP); 1234} 1235 1236int 1237vfs_stdfhtovp (mp, fhp, flags, vpp) 1238 struct mount *mp; 1239 struct fid *fhp; 1240 int flags; 1241 struct vnode **vpp; 1242{ 1243 1244 return (EOPNOTSUPP); 1245} 1246 1247int 1248vfs_stdinit (vfsp) 1249 struct vfsconf *vfsp; 1250{ 1251 1252 return (0); 1253} 1254 1255int 1256vfs_stduninit (vfsp) 1257 struct vfsconf *vfsp; 1258{ 1259 1260 return(0); 1261} 1262 1263int 1264vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, attrname) 1265 struct mount *mp; 1266 int cmd; 1267 struct vnode *filename_vp; 1268 int attrnamespace; 1269 const char *attrname; 1270{ 1271 1272 if (filename_vp != NULL) 1273 VOP_UNLOCK(filename_vp, 0); 1274 return (EOPNOTSUPP); 1275} 1276 1277int 1278vfs_stdsysctl(mp, op, req) 1279 struct mount *mp; 1280 fsctlop_t op; 1281 struct sysctl_req *req; 1282{ 1283 1284 return (EOPNOTSUPP); 1285} 1286 1287/* end of vfs default ops */ 1288