vfs_default.c revision 198873
1/*- 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed 6 * to Berkeley by John Heidemann of the UCLA Ficus project. 7 * 8 * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35#include <sys/cdefs.h> 36__FBSDID("$FreeBSD: head/sys/kern/vfs_default.c 198873 2009-11-04 06:47:14Z trasz $"); 37 38#include <sys/param.h> 39#include <sys/systm.h> 40#include <sys/bio.h> 41#include <sys/buf.h> 42#include <sys/conf.h> 43#include <sys/event.h> 44#include <sys/kernel.h> 45#include <sys/limits.h> 46#include <sys/lock.h> 47#include <sys/lockf.h> 48#include <sys/malloc.h> 49#include <sys/mount.h> 50#include <sys/mutex.h> 51#include <sys/namei.h> 52#include <sys/fcntl.h> 53#include <sys/unistd.h> 54#include <sys/vnode.h> 55#include <sys/dirent.h> 56#include <sys/poll.h> 57 58#include <security/mac/mac_framework.h> 59 60#include <vm/vm.h> 61#include <vm/vm_object.h> 62#include <vm/vm_extern.h> 63#include <vm/pmap.h> 64#include <vm/vm_map.h> 65#include <vm/vm_page.h> 66#include <vm/vm_pager.h> 67#include <vm/vnode_pager.h> 68 69static int vop_nolookup(struct vop_lookup_args *); 70static int vop_nostrategy(struct vop_strategy_args *); 71static int get_next_dirent(struct vnode *vp, struct dirent **dpp, 72 char *dirbuf, int dirbuflen, off_t *off, 73 char **cpos, int *len, int *eofflag, 74 struct thread *td); 75static int dirent_exists(struct vnode *vp, const char *dirname, 76 struct thread *td); 77 78#define DIRENT_MINSIZE (sizeof(struct dirent) - (MAXNAMLEN+1) + 4) 79 80/* 81 * This vnode table stores what we want to do if the filesystem doesn't 82 * implement a particular VOP. 83 * 84 * If there is no specific entry here, we will return EOPNOTSUPP. 85 * 86 * Note that every filesystem has to implement either vop_access 87 * or vop_accessx; failing to do so will result in immediate crash 88 * due to stack overflow, as vop_stdaccess() calls vop_stdaccessx(), 89 * which calls vop_stdaccess() etc. 90 */ 91 92struct vop_vector default_vnodeops = { 93 .vop_default = NULL, 94 .vop_bypass = VOP_EOPNOTSUPP, 95 96 .vop_access = vop_stdaccess, 97 .vop_accessx = vop_stdaccessx, 98 .vop_advlock = vop_stdadvlock, 99 .vop_advlockasync = vop_stdadvlockasync, 100 .vop_bmap = vop_stdbmap, 101 .vop_close = VOP_NULL, 102 .vop_fsync = VOP_NULL, 103 .vop_getpages = vop_stdgetpages, 104 .vop_getwritemount = vop_stdgetwritemount, 105 .vop_inactive = VOP_NULL, 106 .vop_ioctl = VOP_ENOTTY, 107 .vop_kqfilter = vop_stdkqfilter, 108 .vop_islocked = vop_stdislocked, 109 .vop_lock1 = vop_stdlock, 110 .vop_lookup = vop_nolookup, 111 .vop_open = VOP_NULL, 112 .vop_pathconf = VOP_EINVAL, 113 .vop_poll = vop_nopoll, 114 .vop_putpages = vop_stdputpages, 115 .vop_readlink = VOP_EINVAL, 116 .vop_revoke = VOP_PANIC, 117 .vop_strategy = vop_nostrategy, 118 .vop_unlock = vop_stdunlock, 119 .vop_vptocnp = vop_stdvptocnp, 120 .vop_vptofh = vop_stdvptofh, 121}; 122 123/* 124 * Series of placeholder functions for various error returns for 125 * VOPs. 126 */ 127 128int 129vop_eopnotsupp(struct vop_generic_args *ap) 130{ 131 /* 132 printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name); 133 */ 134 135 return (EOPNOTSUPP); 136} 137 138int 139vop_ebadf(struct vop_generic_args *ap) 140{ 141 142 return (EBADF); 143} 144 145int 146vop_enotty(struct vop_generic_args *ap) 147{ 148 149 return (ENOTTY); 150} 151 152int 153vop_einval(struct vop_generic_args *ap) 154{ 155 156 return (EINVAL); 157} 158 159int 160vop_enoent(struct vop_generic_args *ap) 161{ 162 163 return (ENOENT); 164} 165 166int 167vop_null(struct vop_generic_args *ap) 168{ 169 170 return (0); 171} 172 173/* 174 * Helper function to panic on some bad VOPs in some filesystems. 175 */ 176int 177vop_panic(struct vop_generic_args *ap) 178{ 179 180 panic("filesystem goof: vop_panic[%s]", ap->a_desc->vdesc_name); 181} 182 183/* 184 * vop_std<something> and vop_no<something> are default functions for use by 185 * filesystems that need the "default reasonable" implementation for a 186 * particular operation. 187 * 188 * The documentation for the operations they implement exists (if it exists) 189 * in the VOP_<SOMETHING>(9) manpage (all uppercase). 190 */ 191 192/* 193 * Default vop for filesystems that do not support name lookup 194 */ 195static int 196vop_nolookup(ap) 197 struct vop_lookup_args /* { 198 struct vnode *a_dvp; 199 struct vnode **a_vpp; 200 struct componentname *a_cnp; 201 } */ *ap; 202{ 203 204 *ap->a_vpp = NULL; 205 return (ENOTDIR); 206} 207 208/* 209 * vop_nostrategy: 210 * 211 * Strategy routine for VFS devices that have none. 212 * 213 * BIO_ERROR and B_INVAL must be cleared prior to calling any strategy 214 * routine. Typically this is done for a BIO_READ strategy call. 215 * Typically B_INVAL is assumed to already be clear prior to a write 216 * and should not be cleared manually unless you just made the buffer 217 * invalid. BIO_ERROR should be cleared either way. 218 */ 219 220static int 221vop_nostrategy (struct vop_strategy_args *ap) 222{ 223 printf("No strategy for buffer at %p\n", ap->a_bp); 224 vprint("vnode", ap->a_vp); 225 ap->a_bp->b_ioflags |= BIO_ERROR; 226 ap->a_bp->b_error = EOPNOTSUPP; 227 bufdone(ap->a_bp); 228 return (EOPNOTSUPP); 229} 230 231static int 232get_next_dirent(struct vnode *vp, struct dirent **dpp, char *dirbuf, 233 int dirbuflen, off_t *off, char **cpos, int *len, 234 int *eofflag, struct thread *td) 235{ 236 int error, reclen; 237 struct uio uio; 238 struct iovec iov; 239 struct dirent *dp; 240 241 KASSERT(VOP_ISLOCKED(vp), ("vp %p is not locked", vp)); 242 KASSERT(vp->v_type == VDIR, ("vp %p is not a directory", vp)); 243 244 if (*len == 0) { 245 iov.iov_base = dirbuf; 246 iov.iov_len = dirbuflen; 247 248 uio.uio_iov = &iov; 249 uio.uio_iovcnt = 1; 250 uio.uio_offset = *off; 251 uio.uio_resid = dirbuflen; 252 uio.uio_segflg = UIO_SYSSPACE; 253 uio.uio_rw = UIO_READ; 254 uio.uio_td = td; 255 256 *eofflag = 0; 257 258#ifdef MAC 259 error = mac_vnode_check_readdir(td->td_ucred, vp); 260 if (error == 0) 261#endif 262 error = VOP_READDIR(vp, &uio, td->td_ucred, eofflag, 263 NULL, NULL); 264 if (error) 265 return (error); 266 267 *off = uio.uio_offset; 268 269 *cpos = dirbuf; 270 *len = (dirbuflen - uio.uio_resid); 271 } 272 273 dp = (struct dirent *)(*cpos); 274 reclen = dp->d_reclen; 275 *dpp = dp; 276 277 /* check for malformed directory.. */ 278 if (reclen < DIRENT_MINSIZE) 279 return (EINVAL); 280 281 *cpos += reclen; 282 *len -= reclen; 283 284 return (0); 285} 286 287/* 288 * Check if a named file exists in a given directory vnode. 289 */ 290static int 291dirent_exists(struct vnode *vp, const char *dirname, struct thread *td) 292{ 293 char *dirbuf, *cpos; 294 int error, eofflag, dirbuflen, len, found; 295 off_t off; 296 struct dirent *dp; 297 struct vattr va; 298 299 KASSERT(VOP_ISLOCKED(vp), ("vp %p is not locked", vp)); 300 KASSERT(vp->v_type == VDIR, ("vp %p is not a directory", vp)); 301 302 found = 0; 303 304 error = VOP_GETATTR(vp, &va, td->td_ucred); 305 if (error) 306 return (found); 307 308 dirbuflen = DEV_BSIZE; 309 if (dirbuflen < va.va_blocksize) 310 dirbuflen = va.va_blocksize; 311 dirbuf = (char *)malloc(dirbuflen, M_TEMP, M_WAITOK); 312 313 off = 0; 314 len = 0; 315 do { 316 error = get_next_dirent(vp, &dp, dirbuf, dirbuflen, &off, 317 &cpos, &len, &eofflag, td); 318 if (error) 319 goto out; 320 321 if ((dp->d_type != DT_WHT) && 322 !strcmp(dp->d_name, dirname)) { 323 found = 1; 324 goto out; 325 } 326 } while (len > 0 || !eofflag); 327 328out: 329 free(dirbuf, M_TEMP); 330 return (found); 331} 332 333int 334vop_stdaccess(struct vop_access_args *ap) 335{ 336 337 KASSERT((ap->a_accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | 338 VAPPEND)) == 0, ("invalid bit in accmode")); 339 340 return (VOP_ACCESSX(ap->a_vp, ap->a_accmode, ap->a_cred, ap->a_td)); 341} 342 343int 344vop_stdaccessx(struct vop_accessx_args *ap) 345{ 346 int error; 347 accmode_t accmode = ap->a_accmode; 348 349 error = vfs_unixify_accmode(&accmode); 350 if (error != 0) 351 return (error); 352 353 if (accmode == 0) 354 return (0); 355 356 /* 357 * Many VOP_APPEND implementations don't expect VAPPEND without VWRITE 358 * being set, e.g. they check whether the filesystem is read-only only 359 * when VWRITE is set. Make sure we don't confuse them. 360 */ 361 if (accmode & VAPPEND) 362 accmode |= VWRITE; 363 364 return (VOP_ACCESS(ap->a_vp, accmode, ap->a_cred, ap->a_td)); 365} 366 367/* 368 * Advisory record locking support 369 */ 370int 371vop_stdadvlock(struct vop_advlock_args *ap) 372{ 373 struct vnode *vp; 374 struct ucred *cred; 375 struct vattr vattr; 376 int error; 377 378 vp = ap->a_vp; 379 cred = curthread->td_ucred; 380 vn_lock(vp, LK_SHARED | LK_RETRY); 381 error = VOP_GETATTR(vp, &vattr, cred); 382 VOP_UNLOCK(vp, 0); 383 if (error) 384 return (error); 385 386 return (lf_advlock(ap, &(vp->v_lockf), vattr.va_size)); 387} 388 389int 390vop_stdadvlockasync(struct vop_advlockasync_args *ap) 391{ 392 struct vnode *vp; 393 struct ucred *cred; 394 struct vattr vattr; 395 int error; 396 397 vp = ap->a_vp; 398 cred = curthread->td_ucred; 399 vn_lock(vp, LK_SHARED | LK_RETRY); 400 error = VOP_GETATTR(vp, &vattr, cred); 401 VOP_UNLOCK(vp, 0); 402 if (error) 403 return (error); 404 405 return (lf_advlockasync(ap, &(vp->v_lockf), vattr.va_size)); 406} 407 408/* 409 * vop_stdpathconf: 410 * 411 * Standard implementation of POSIX pathconf, to get information about limits 412 * for a filesystem. 413 * Override per filesystem for the case where the filesystem has smaller 414 * limits. 415 */ 416int 417vop_stdpathconf(ap) 418 struct vop_pathconf_args /* { 419 struct vnode *a_vp; 420 int a_name; 421 int *a_retval; 422 } */ *ap; 423{ 424 425 switch (ap->a_name) { 426 case _PC_NAME_MAX: 427 *ap->a_retval = NAME_MAX; 428 return (0); 429 case _PC_PATH_MAX: 430 *ap->a_retval = PATH_MAX; 431 return (0); 432 case _PC_LINK_MAX: 433 *ap->a_retval = LINK_MAX; 434 return (0); 435 case _PC_MAX_CANON: 436 *ap->a_retval = MAX_CANON; 437 return (0); 438 case _PC_MAX_INPUT: 439 *ap->a_retval = MAX_INPUT; 440 return (0); 441 case _PC_PIPE_BUF: 442 *ap->a_retval = PIPE_BUF; 443 return (0); 444 case _PC_CHOWN_RESTRICTED: 445 *ap->a_retval = 1; 446 return (0); 447 case _PC_VDISABLE: 448 *ap->a_retval = _POSIX_VDISABLE; 449 return (0); 450 default: 451 return (EINVAL); 452 } 453 /* NOTREACHED */ 454} 455 456/* 457 * Standard lock, unlock and islocked functions. 458 */ 459int 460vop_stdlock(ap) 461 struct vop_lock1_args /* { 462 struct vnode *a_vp; 463 int a_flags; 464 char *file; 465 int line; 466 } */ *ap; 467{ 468 struct vnode *vp = ap->a_vp; 469 470 return (_lockmgr_args(vp->v_vnlock, ap->a_flags, VI_MTX(vp), 471 LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, ap->a_file, 472 ap->a_line)); 473} 474 475/* See above. */ 476int 477vop_stdunlock(ap) 478 struct vop_unlock_args /* { 479 struct vnode *a_vp; 480 int a_flags; 481 } */ *ap; 482{ 483 struct vnode *vp = ap->a_vp; 484 485 return (lockmgr(vp->v_vnlock, ap->a_flags | LK_RELEASE, VI_MTX(vp))); 486} 487 488/* See above. */ 489int 490vop_stdislocked(ap) 491 struct vop_islocked_args /* { 492 struct vnode *a_vp; 493 } */ *ap; 494{ 495 496 return (lockstatus(ap->a_vp->v_vnlock)); 497} 498 499/* 500 * Return true for select/poll. 501 */ 502int 503vop_nopoll(ap) 504 struct vop_poll_args /* { 505 struct vnode *a_vp; 506 int a_events; 507 struct ucred *a_cred; 508 struct thread *a_td; 509 } */ *ap; 510{ 511 512 return (poll_no_poll(ap->a_events)); 513} 514 515/* 516 * Implement poll for local filesystems that support it. 517 */ 518int 519vop_stdpoll(ap) 520 struct vop_poll_args /* { 521 struct vnode *a_vp; 522 int a_events; 523 struct ucred *a_cred; 524 struct thread *a_td; 525 } */ *ap; 526{ 527 if (ap->a_events & ~POLLSTANDARD) 528 return (vn_pollrecord(ap->a_vp, ap->a_td, ap->a_events)); 529 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 530} 531 532/* 533 * Return our mount point, as we will take charge of the writes. 534 */ 535int 536vop_stdgetwritemount(ap) 537 struct vop_getwritemount_args /* { 538 struct vnode *a_vp; 539 struct mount **a_mpp; 540 } */ *ap; 541{ 542 struct mount *mp; 543 544 /* 545 * XXX Since this is called unlocked we may be recycled while 546 * attempting to ref the mount. If this is the case or mountpoint 547 * will be set to NULL. We only have to prevent this call from 548 * returning with a ref to an incorrect mountpoint. It is not 549 * harmful to return with a ref to our previous mountpoint. 550 */ 551 mp = ap->a_vp->v_mount; 552 if (mp != NULL) { 553 vfs_ref(mp); 554 if (mp != ap->a_vp->v_mount) { 555 vfs_rel(mp); 556 mp = NULL; 557 } 558 } 559 *(ap->a_mpp) = mp; 560 return (0); 561} 562 563/* XXX Needs good comment and VOP_BMAP(9) manpage */ 564int 565vop_stdbmap(ap) 566 struct vop_bmap_args /* { 567 struct vnode *a_vp; 568 daddr_t a_bn; 569 struct bufobj **a_bop; 570 daddr_t *a_bnp; 571 int *a_runp; 572 int *a_runb; 573 } */ *ap; 574{ 575 576 if (ap->a_bop != NULL) 577 *ap->a_bop = &ap->a_vp->v_bufobj; 578 if (ap->a_bnp != NULL) 579 *ap->a_bnp = ap->a_bn * btodb(ap->a_vp->v_mount->mnt_stat.f_iosize); 580 if (ap->a_runp != NULL) 581 *ap->a_runp = 0; 582 if (ap->a_runb != NULL) 583 *ap->a_runb = 0; 584 return (0); 585} 586 587int 588vop_stdfsync(ap) 589 struct vop_fsync_args /* { 590 struct vnode *a_vp; 591 struct ucred *a_cred; 592 int a_waitfor; 593 struct thread *a_td; 594 } */ *ap; 595{ 596 struct vnode *vp = ap->a_vp; 597 struct buf *bp; 598 struct bufobj *bo; 599 struct buf *nbp; 600 int error = 0; 601 int maxretry = 1000; /* large, arbitrarily chosen */ 602 603 bo = &vp->v_bufobj; 604 BO_LOCK(bo); 605loop1: 606 /* 607 * MARK/SCAN initialization to avoid infinite loops. 608 */ 609 TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) { 610 bp->b_vflags &= ~BV_SCANNED; 611 bp->b_error = 0; 612 } 613 614 /* 615 * Flush all dirty buffers associated with a vnode. 616 */ 617loop2: 618 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 619 if ((bp->b_vflags & BV_SCANNED) != 0) 620 continue; 621 bp->b_vflags |= BV_SCANNED; 622 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) 623 continue; 624 BO_UNLOCK(bo); 625 KASSERT(bp->b_bufobj == bo, 626 ("bp %p wrong b_bufobj %p should be %p", 627 bp, bp->b_bufobj, bo)); 628 if ((bp->b_flags & B_DELWRI) == 0) 629 panic("fsync: not dirty"); 630 if ((vp->v_object != NULL) && (bp->b_flags & B_CLUSTEROK)) { 631 vfs_bio_awrite(bp); 632 } else { 633 bremfree(bp); 634 bawrite(bp); 635 } 636 BO_LOCK(bo); 637 goto loop2; 638 } 639 640 /* 641 * If synchronous the caller expects us to completely resolve all 642 * dirty buffers in the system. Wait for in-progress I/O to 643 * complete (which could include background bitmap writes), then 644 * retry if dirty blocks still exist. 645 */ 646 if (ap->a_waitfor == MNT_WAIT) { 647 bufobj_wwait(bo, 0, 0); 648 if (bo->bo_dirty.bv_cnt > 0) { 649 /* 650 * If we are unable to write any of these buffers 651 * then we fail now rather than trying endlessly 652 * to write them out. 653 */ 654 TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) 655 if ((error = bp->b_error) == 0) 656 continue; 657 if (error == 0 && --maxretry >= 0) 658 goto loop1; 659 error = EAGAIN; 660 } 661 } 662 BO_UNLOCK(bo); 663 if (error == EAGAIN) 664 vprint("fsync: giving up on dirty", vp); 665 666 return (error); 667} 668 669/* XXX Needs good comment and more info in the manpage (VOP_GETPAGES(9)). */ 670int 671vop_stdgetpages(ap) 672 struct vop_getpages_args /* { 673 struct vnode *a_vp; 674 vm_page_t *a_m; 675 int a_count; 676 int a_reqpage; 677 vm_ooffset_t a_offset; 678 } */ *ap; 679{ 680 681 return vnode_pager_generic_getpages(ap->a_vp, ap->a_m, 682 ap->a_count, ap->a_reqpage); 683} 684 685int 686vop_stdkqfilter(struct vop_kqfilter_args *ap) 687{ 688 return vfs_kqfilter(ap); 689} 690 691/* XXX Needs good comment and more info in the manpage (VOP_PUTPAGES(9)). */ 692int 693vop_stdputpages(ap) 694 struct vop_putpages_args /* { 695 struct vnode *a_vp; 696 vm_page_t *a_m; 697 int a_count; 698 int a_sync; 699 int *a_rtvals; 700 vm_ooffset_t a_offset; 701 } */ *ap; 702{ 703 704 return vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count, 705 ap->a_sync, ap->a_rtvals); 706} 707 708int 709vop_stdvptofh(struct vop_vptofh_args *ap) 710{ 711 return (EOPNOTSUPP); 712} 713 714int 715vop_stdvptocnp(struct vop_vptocnp_args *ap) 716{ 717 struct vnode *vp = ap->a_vp; 718 struct vnode **dvp = ap->a_vpp; 719 struct ucred *cred = ap->a_cred; 720 char *buf = ap->a_buf; 721 int *buflen = ap->a_buflen; 722 char *dirbuf, *cpos; 723 int i, error, eofflag, dirbuflen, flags, locked, len, covered; 724 off_t off; 725 ino_t fileno; 726 struct vattr va; 727 struct nameidata nd; 728 struct thread *td; 729 struct dirent *dp; 730 struct vnode *mvp; 731 732 i = *buflen; 733 error = 0; 734 covered = 0; 735 td = curthread; 736 737 if (vp->v_type != VDIR) 738 return (ENOENT); 739 740 error = VOP_GETATTR(vp, &va, cred); 741 if (error) 742 return (error); 743 744 VREF(vp); 745 locked = VOP_ISLOCKED(vp); 746 VOP_UNLOCK(vp, 0); 747 NDINIT_ATVP(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, 748 "..", vp, td); 749 flags = FREAD; 750 error = vn_open_cred(&nd, &flags, 0, VN_OPEN_NOAUDIT, cred, NULL); 751 if (error) { 752 vn_lock(vp, locked | LK_RETRY); 753 return (error); 754 } 755 NDFREE(&nd, NDF_ONLY_PNBUF); 756 757 mvp = *dvp = nd.ni_vp; 758 759 if (vp->v_mount != (*dvp)->v_mount && 760 ((*dvp)->v_vflag & VV_ROOT) && 761 ((*dvp)->v_mount->mnt_flag & MNT_UNION)) { 762 *dvp = (*dvp)->v_mount->mnt_vnodecovered; 763 VREF(mvp); 764 VOP_UNLOCK(mvp, 0); 765 vn_close(mvp, FREAD, cred, td); 766 VREF(*dvp); 767 vn_lock(*dvp, LK_EXCLUSIVE | LK_RETRY); 768 covered = 1; 769 } 770 771 fileno = va.va_fileid; 772 773 dirbuflen = DEV_BSIZE; 774 if (dirbuflen < va.va_blocksize) 775 dirbuflen = va.va_blocksize; 776 dirbuf = (char *)malloc(dirbuflen, M_TEMP, M_WAITOK); 777 778 if ((*dvp)->v_type != VDIR) { 779 error = ENOENT; 780 goto out; 781 } 782 783 off = 0; 784 len = 0; 785 do { 786 /* call VOP_READDIR of parent */ 787 error = get_next_dirent(*dvp, &dp, dirbuf, dirbuflen, &off, 788 &cpos, &len, &eofflag, td); 789 if (error) 790 goto out; 791 792 if ((dp->d_type != DT_WHT) && 793 (dp->d_fileno == fileno)) { 794 if (covered) { 795 VOP_UNLOCK(*dvp, 0); 796 vn_lock(mvp, LK_EXCLUSIVE | LK_RETRY); 797 if (dirent_exists(mvp, dp->d_name, td)) { 798 error = ENOENT; 799 VOP_UNLOCK(mvp, 0); 800 vn_lock(*dvp, LK_EXCLUSIVE | LK_RETRY); 801 goto out; 802 } 803 VOP_UNLOCK(mvp, 0); 804 vn_lock(*dvp, LK_EXCLUSIVE | LK_RETRY); 805 } 806 i -= dp->d_namlen; 807 808 if (i < 0) { 809 error = ENOMEM; 810 goto out; 811 } 812 bcopy(dp->d_name, buf + i, dp->d_namlen); 813 error = 0; 814 goto out; 815 } 816 } while (len > 0 || !eofflag); 817 error = ENOENT; 818 819out: 820 free(dirbuf, M_TEMP); 821 if (!error) { 822 *buflen = i; 823 vhold(*dvp); 824 } 825 if (covered) { 826 vput(*dvp); 827 vrele(mvp); 828 } else { 829 VOP_UNLOCK(mvp, 0); 830 vn_close(mvp, FREAD, cred, td); 831 } 832 vn_lock(vp, locked | LK_RETRY); 833 return (error); 834} 835 836/* 837 * vfs default ops 838 * used to fill the vfs function table to get reasonable default return values. 839 */ 840int 841vfs_stdroot (mp, flags, vpp) 842 struct mount *mp; 843 int flags; 844 struct vnode **vpp; 845{ 846 847 return (EOPNOTSUPP); 848} 849 850int 851vfs_stdstatfs (mp, sbp) 852 struct mount *mp; 853 struct statfs *sbp; 854{ 855 856 return (EOPNOTSUPP); 857} 858 859int 860vfs_stdquotactl (mp, cmds, uid, arg) 861 struct mount *mp; 862 int cmds; 863 uid_t uid; 864 void *arg; 865{ 866 867 return (EOPNOTSUPP); 868} 869 870int 871vfs_stdsync(mp, waitfor) 872 struct mount *mp; 873 int waitfor; 874{ 875 struct vnode *vp, *mvp; 876 struct thread *td; 877 int error, lockreq, allerror = 0; 878 879 td = curthread; 880 lockreq = LK_EXCLUSIVE | LK_INTERLOCK; 881 if (waitfor != MNT_WAIT) 882 lockreq |= LK_NOWAIT; 883 /* 884 * Force stale buffer cache information to be flushed. 885 */ 886 MNT_ILOCK(mp); 887loop: 888 MNT_VNODE_FOREACH(vp, mp, mvp) { 889 /* bv_cnt is an acceptable race here. */ 890 if (vp->v_bufobj.bo_dirty.bv_cnt == 0) 891 continue; 892 VI_LOCK(vp); 893 MNT_IUNLOCK(mp); 894 if ((error = vget(vp, lockreq, td)) != 0) { 895 MNT_ILOCK(mp); 896 if (error == ENOENT) { 897 MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp); 898 goto loop; 899 } 900 continue; 901 } 902 error = VOP_FSYNC(vp, waitfor, td); 903 if (error) 904 allerror = error; 905 906 /* Do not turn this into vput. td is not always curthread. */ 907 VOP_UNLOCK(vp, 0); 908 vrele(vp); 909 MNT_ILOCK(mp); 910 } 911 MNT_IUNLOCK(mp); 912 return (allerror); 913} 914 915int 916vfs_stdnosync (mp, waitfor) 917 struct mount *mp; 918 int waitfor; 919{ 920 921 return (0); 922} 923 924int 925vfs_stdvget (mp, ino, flags, vpp) 926 struct mount *mp; 927 ino_t ino; 928 int flags; 929 struct vnode **vpp; 930{ 931 932 return (EOPNOTSUPP); 933} 934 935int 936vfs_stdfhtovp (mp, fhp, vpp) 937 struct mount *mp; 938 struct fid *fhp; 939 struct vnode **vpp; 940{ 941 942 return (EOPNOTSUPP); 943} 944 945int 946vfs_stdinit (vfsp) 947 struct vfsconf *vfsp; 948{ 949 950 return (0); 951} 952 953int 954vfs_stduninit (vfsp) 955 struct vfsconf *vfsp; 956{ 957 958 return(0); 959} 960 961int 962vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, attrname) 963 struct mount *mp; 964 int cmd; 965 struct vnode *filename_vp; 966 int attrnamespace; 967 const char *attrname; 968{ 969 970 if (filename_vp != NULL) 971 VOP_UNLOCK(filename_vp, 0); 972 return (EOPNOTSUPP); 973} 974 975int 976vfs_stdsysctl(mp, op, req) 977 struct mount *mp; 978 fsctlop_t op; 979 struct sysctl_req *req; 980{ 981 982 return (EOPNOTSUPP); 983} 984 985/* end of vfs default ops */ 986