vfs_default.c revision 162455
1139804Simp/*- 230489Sphk * Copyright (c) 1989, 1993 330489Sphk * The Regents of the University of California. All rights reserved. 430489Sphk * 530489Sphk * This code is derived from software contributed 630489Sphk * to Berkeley by John Heidemann of the UCLA Ficus project. 730489Sphk * 830489Sphk * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project 930489Sphk * 1030489Sphk * Redistribution and use in source and binary forms, with or without 1130489Sphk * modification, are permitted provided that the following conditions 1230489Sphk * are met: 1330489Sphk * 1. Redistributions of source code must retain the above copyright 1430489Sphk * notice, this list of conditions and the following disclaimer. 1530489Sphk * 2. Redistributions in binary form must reproduce the above copyright 1630489Sphk * notice, this list of conditions and the following disclaimer in the 1730489Sphk * documentation and/or other materials provided with the distribution. 1830489Sphk * 4. Neither the name of the University nor the names of its contributors 1930489Sphk * may be used to endorse or promote products derived from this software 2030489Sphk * without specific prior written permission. 2130489Sphk * 2230489Sphk * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 2330489Sphk * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 2430489Sphk * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 2530489Sphk * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 2630489Sphk * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 2730489Sphk * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 2830489Sphk * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 2930489Sphk * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 3030489Sphk * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 3130489Sphk * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 3230489Sphk * SUCH DAMAGE. 3330489Sphk */ 3430489Sphk 35116182Sobrien#include <sys/cdefs.h> 36116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/vfs_default.c 162455 2006-09-20 00:27:02Z tegge $"); 37116182Sobrien 3830489Sphk#include <sys/param.h> 3930489Sphk#include <sys/systm.h> 4060041Sphk#include <sys/bio.h> 4144272Sbde#include <sys/buf.h> 4265770Sbp#include <sys/conf.h> 43147198Sssouhlal#include <sys/event.h> 4430489Sphk#include <sys/kernel.h> 45114216Skan#include <sys/limits.h> 4631561Sbde#include <sys/lock.h> 4730743Sphk#include <sys/malloc.h> 4851068Salfred#include <sys/mount.h> 4967365Sjhb#include <sys/mutex.h> 5030492Sphk#include <sys/unistd.h> 5130489Sphk#include <sys/vnode.h> 5230743Sphk#include <sys/poll.h> 5330489Sphk 5465770Sbp#include <vm/vm.h> 5565770Sbp#include <vm/vm_object.h> 5665770Sbp#include <vm/vm_extern.h> 5765770Sbp#include <vm/pmap.h> 5865770Sbp#include <vm/vm_map.h> 5965770Sbp#include <vm/vm_page.h> 6065770Sbp#include <vm/vm_pager.h> 6165770Sbp#include <vm/vnode_pager.h> 6265770Sbp 6392723Salfredstatic int vop_nolookup(struct vop_lookup_args *); 6492723Salfredstatic int vop_nostrategy(struct vop_strategy_args *); 6530489Sphk 6630489Sphk/* 6730489Sphk * This vnode table stores what we want to do if the filesystem doesn't 6830489Sphk * implement a particular VOP. 6930489Sphk * 7030489Sphk * If there is no specific entry here, we will return EOPNOTSUPP. 7130489Sphk * 7230489Sphk */ 7330489Sphk 74138290Sphkstruct vop_vector default_vnodeops = { 75138290Sphk .vop_default = NULL, 76138339Sphk .vop_bypass = VOP_EOPNOTSUPP, 77138339Sphk 78138290Sphk .vop_advlock = VOP_EINVAL, 79138290Sphk .vop_bmap = vop_stdbmap, 80138290Sphk .vop_close = VOP_NULL, 81138290Sphk .vop_fsync = VOP_NULL, 82138290Sphk .vop_getpages = vop_stdgetpages, 83138290Sphk .vop_getwritemount = vop_stdgetwritemount, 84143494Sjeff .vop_inactive = VOP_NULL, 85138290Sphk .vop_ioctl = VOP_ENOTTY, 86147198Sssouhlal .vop_kqfilter = vop_stdkqfilter, 87138290Sphk .vop_islocked = vop_stdislocked, 88138290Sphk .vop_lease = VOP_NULL, 89138290Sphk .vop_lock = vop_stdlock, 90138290Sphk .vop_lookup = vop_nolookup, 91138290Sphk .vop_open = VOP_NULL, 92138290Sphk .vop_pathconf = VOP_EINVAL, 93138290Sphk .vop_poll = vop_nopoll, 94138290Sphk .vop_putpages = vop_stdputpages, 95138290Sphk .vop_readlink = VOP_EINVAL, 96138290Sphk .vop_revoke = VOP_PANIC, 97138290Sphk .vop_strategy = vop_nostrategy, 98138290Sphk .vop_unlock = vop_stdunlock, 9930489Sphk}; 10030489Sphk 10191690Seivind/* 10291690Seivind * Series of placeholder functions for various error returns for 10391690Seivind * VOPs. 10491690Seivind */ 10591690Seivind 10630489Sphkint 10730492Sphkvop_eopnotsupp(struct vop_generic_args *ap) 10830489Sphk{ 10930489Sphk /* 11030492Sphk printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name); 11130489Sphk */ 11230489Sphk 11330489Sphk return (EOPNOTSUPP); 11430489Sphk} 11530489Sphk 11630489Sphkint 11730492Sphkvop_ebadf(struct vop_generic_args *ap) 11830489Sphk{ 11930489Sphk 12030492Sphk return (EBADF); 12130492Sphk} 12230492Sphk 12330492Sphkint 12430492Sphkvop_enotty(struct vop_generic_args *ap) 12530492Sphk{ 12630492Sphk 12730492Sphk return (ENOTTY); 12830492Sphk} 12930492Sphk 13030492Sphkint 13130492Sphkvop_einval(struct vop_generic_args *ap) 13230492Sphk{ 13330492Sphk 13430492Sphk return (EINVAL); 13530492Sphk} 13630492Sphk 13730492Sphkint 13830492Sphkvop_null(struct vop_generic_args *ap) 13930492Sphk{ 14030492Sphk 14130492Sphk return (0); 14230492Sphk} 14330492Sphk 14491690Seivind/* 14591690Seivind * Helper function to panic on some bad VOPs in some filesystems. 14691690Seivind */ 14741056Speterint 14841056Spetervop_panic(struct vop_generic_args *ap) 14941056Speter{ 15041056Speter 15172594Sbde panic("filesystem goof: vop_panic[%s]", ap->a_desc->vdesc_name); 15241056Speter} 15341056Speter 15491690Seivind/* 15591690Seivind * vop_std<something> and vop_no<something> are default functions for use by 15691690Seivind * filesystems that need the "default reasonable" implementation for a 15791690Seivind * particular operation. 15891690Seivind * 15991690Seivind * The documentation for the operations they implement exists (if it exists) 16091690Seivind * in the VOP_<SOMETHING>(9) manpage (all uppercase). 16191690Seivind */ 16291690Seivind 16391690Seivind/* 16491690Seivind * Default vop for filesystems that do not support name lookup 16591690Seivind */ 16672594Sbdestatic int 16772594Sbdevop_nolookup(ap) 16872594Sbde struct vop_lookup_args /* { 16972594Sbde struct vnode *a_dvp; 17072594Sbde struct vnode **a_vpp; 17172594Sbde struct componentname *a_cnp; 17272594Sbde } */ *ap; 17372594Sbde{ 17472594Sbde 17572594Sbde *ap->a_vpp = NULL; 17672594Sbde return (ENOTDIR); 17772594Sbde} 17872594Sbde 17946349Salc/* 18046349Salc * vop_nostrategy: 18146349Salc * 18246349Salc * Strategy routine for VFS devices that have none. 18346349Salc * 18458934Sphk * BIO_ERROR and B_INVAL must be cleared prior to calling any strategy 18558345Sphk * routine. Typically this is done for a BIO_READ strategy call. 186112067Skan * Typically B_INVAL is assumed to already be clear prior to a write 18758345Sphk * and should not be cleared manually unless you just made the buffer 18858934Sphk * invalid. BIO_ERROR should be cleared either way. 18946349Salc */ 19046349Salc 19130489Sphkstatic int 19230489Sphkvop_nostrategy (struct vop_strategy_args *ap) 19330489Sphk{ 19430489Sphk printf("No strategy for buffer at %p\n", ap->a_bp); 195111842Snjl vprint("vnode", ap->a_vp); 19658934Sphk ap->a_bp->b_ioflags |= BIO_ERROR; 19730489Sphk ap->a_bp->b_error = EOPNOTSUPP; 19859249Sphk bufdone(ap->a_bp); 19930489Sphk return (EOPNOTSUPP); 20030489Sphk} 20130492Sphk 20291690Seivind/* 20391690Seivind * vop_stdpathconf: 204112067Skan * 20591690Seivind * Standard implementation of POSIX pathconf, to get information about limits 20691690Seivind * for a filesystem. 20791690Seivind * Override per filesystem for the case where the filesystem has smaller 20891690Seivind * limits. 20991690Seivind */ 21030492Sphkint 21130492Sphkvop_stdpathconf(ap) 21230492Sphk struct vop_pathconf_args /* { 21330492Sphk struct vnode *a_vp; 21430492Sphk int a_name; 21530492Sphk int *a_retval; 21630492Sphk } */ *ap; 21730492Sphk{ 21830492Sphk 21930492Sphk switch (ap->a_name) { 220149175Sphk case _PC_NAME_MAX: 221149175Sphk *ap->a_retval = NAME_MAX; 222149175Sphk return (0); 223149175Sphk case _PC_PATH_MAX: 224149175Sphk *ap->a_retval = PATH_MAX; 225149175Sphk return (0); 22630492Sphk case _PC_LINK_MAX: 22730492Sphk *ap->a_retval = LINK_MAX; 22830492Sphk return (0); 22930492Sphk case _PC_MAX_CANON: 23030492Sphk *ap->a_retval = MAX_CANON; 23130492Sphk return (0); 23230492Sphk case _PC_MAX_INPUT: 23330492Sphk *ap->a_retval = MAX_INPUT; 23430492Sphk return (0); 23530492Sphk case _PC_PIPE_BUF: 23630492Sphk *ap->a_retval = PIPE_BUF; 23730492Sphk return (0); 23830492Sphk case _PC_CHOWN_RESTRICTED: 23930492Sphk *ap->a_retval = 1; 24030492Sphk return (0); 24130492Sphk case _PC_VDISABLE: 24230492Sphk *ap->a_retval = _POSIX_VDISABLE; 24330492Sphk return (0); 24430492Sphk default: 24530492Sphk return (EINVAL); 24630492Sphk } 24730492Sphk /* NOTREACHED */ 24830492Sphk} 24930513Sphk 25030513Sphk/* 25130513Sphk * Standard lock, unlock and islocked functions. 25230513Sphk */ 25330513Sphkint 25430513Sphkvop_stdlock(ap) 25530513Sphk struct vop_lock_args /* { 25630513Sphk struct vnode *a_vp; 25730513Sphk int a_flags; 25883366Sjulian struct thread *a_td; 25930513Sphk } */ *ap; 260112067Skan{ 26166355Sbp struct vnode *vp = ap->a_vp; 26230513Sphk 263105077Smckusick return (lockmgr(vp->v_vnlock, ap->a_flags, VI_MTX(vp), ap->a_td)); 26430513Sphk} 26530513Sphk 26691690Seivind/* See above. */ 26730513Sphkint 26830513Sphkvop_stdunlock(ap) 26930513Sphk struct vop_unlock_args /* { 27030513Sphk struct vnode *a_vp; 27130513Sphk int a_flags; 27283366Sjulian struct thread *a_td; 27330513Sphk } */ *ap; 27430513Sphk{ 27566355Sbp struct vnode *vp = ap->a_vp; 27630513Sphk 277105077Smckusick return (lockmgr(vp->v_vnlock, ap->a_flags | LK_RELEASE, VI_MTX(vp), 27883366Sjulian ap->a_td)); 27930513Sphk} 28030513Sphk 28191690Seivind/* See above. */ 28230513Sphkint 28330513Sphkvop_stdislocked(ap) 28430513Sphk struct vop_islocked_args /* { 28530513Sphk struct vnode *a_vp; 28683366Sjulian struct thread *a_td; 28730513Sphk } */ *ap; 28830513Sphk{ 28930513Sphk 290105077Smckusick return (lockstatus(ap->a_vp->v_vnlock, ap->a_td)); 29130513Sphk} 29230513Sphk 29330743Sphk/* 29430743Sphk * Return true for select/poll. 29530743Sphk */ 29630743Sphkint 29730743Sphkvop_nopoll(ap) 29830743Sphk struct vop_poll_args /* { 29930743Sphk struct vnode *a_vp; 30030743Sphk int a_events; 30130743Sphk struct ucred *a_cred; 30283366Sjulian struct thread *a_td; 30330743Sphk } */ *ap; 30430743Sphk{ 30530743Sphk /* 30631727Swollman * Return true for read/write. If the user asked for something 30731727Swollman * special, return POLLNVAL, so that clients have a way of 30831727Swollman * determining reliably whether or not the extended 30931727Swollman * functionality is present without hard-coding knowledge 31031727Swollman * of specific filesystem implementations. 311120514Sphk * Stay in sync with kern_conf.c::no_poll(). 31230743Sphk */ 31331727Swollman if (ap->a_events & ~POLLSTANDARD) 31431727Swollman return (POLLNVAL); 31531727Swollman 31630743Sphk return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 31730743Sphk} 31830743Sphk 31931727Swollman/* 32031727Swollman * Implement poll for local filesystems that support it. 32131727Swollman */ 32230743Sphkint 32331727Swollmanvop_stdpoll(ap) 32431727Swollman struct vop_poll_args /* { 32531727Swollman struct vnode *a_vp; 32631727Swollman int a_events; 32731727Swollman struct ucred *a_cred; 32883366Sjulian struct thread *a_td; 32931727Swollman } */ *ap; 33031727Swollman{ 33176578Sjlemon if (ap->a_events & ~POLLSTANDARD) 33283366Sjulian return (vn_pollrecord(ap->a_vp, ap->a_td, ap->a_events)); 33376578Sjlemon return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 33431727Swollman} 33531727Swollman 33630743Sphk/* 33762976Smckusick * Return our mount point, as we will take charge of the writes. 33862976Smckusick */ 33962976Smckusickint 34062976Smckusickvop_stdgetwritemount(ap) 34162976Smckusick struct vop_getwritemount_args /* { 34262976Smckusick struct vnode *a_vp; 34362976Smckusick struct mount **a_mpp; 34462976Smckusick } */ *ap; 34562976Smckusick{ 346157323Sjeff struct mount *mp; 34762976Smckusick 348157323Sjeff /* 349157323Sjeff * XXX Since this is called unlocked we may be recycled while 350157323Sjeff * attempting to ref the mount. If this is the case or mountpoint 351157323Sjeff * will be set to NULL. We only have to prevent this call from 352157323Sjeff * returning with a ref to an incorrect mountpoint. It is not 353157323Sjeff * harmful to return with a ref to our previous mountpoint. 354157323Sjeff */ 355157323Sjeff mp = ap->a_vp->v_mount; 356162455Stegge if (mp != NULL) { 357162455Stegge vfs_ref(mp); 358162455Stegge if (mp != ap->a_vp->v_mount) { 359162455Stegge vfs_rel(mp); 360162455Stegge mp = NULL; 361162455Stegge } 362157323Sjeff } 363157323Sjeff *(ap->a_mpp) = mp; 36462976Smckusick return (0); 36562976Smckusick} 36662976Smckusick 36791690Seivind/* XXX Needs good comment and VOP_BMAP(9) manpage */ 36876131Sphkint 36976131Sphkvop_stdbmap(ap) 370112067Skan struct vop_bmap_args /* { 37176131Sphk struct vnode *a_vp; 37276131Sphk daddr_t a_bn; 373137726Sphk struct bufobj **a_bop; 37476131Sphk daddr_t *a_bnp; 37576131Sphk int *a_runp; 37676131Sphk int *a_runb; 37776131Sphk } */ *ap; 37876131Sphk{ 37976131Sphk 380137726Sphk if (ap->a_bop != NULL) 381137726Sphk *ap->a_bop = &ap->a_vp->v_bufobj; 38276131Sphk if (ap->a_bnp != NULL) 38376131Sphk *ap->a_bnp = ap->a_bn * btodb(ap->a_vp->v_mount->mnt_stat.f_iosize); 38476131Sphk if (ap->a_runp != NULL) 38576131Sphk *ap->a_runp = 0; 38676131Sphk if (ap->a_runb != NULL) 38776131Sphk *ap->a_runb = 0; 38876131Sphk return (0); 38976131Sphk} 39076131Sphk 391110584Sjeffint 392110584Sjeffvop_stdfsync(ap) 393110584Sjeff struct vop_fsync_args /* { 394110584Sjeff struct vnode *a_vp; 395110584Sjeff struct ucred *a_cred; 396110584Sjeff int a_waitfor; 397110584Sjeff struct thread *a_td; 398110584Sjeff } */ *ap; 399110584Sjeff{ 400110584Sjeff struct vnode *vp = ap->a_vp; 401110584Sjeff struct buf *bp; 402136751Sphk struct bufobj *bo; 403110584Sjeff struct buf *nbp; 404145732Sjeff int error = 0; 405144584Sjeff int maxretry = 1000; /* large, arbitrarily chosen */ 406110584Sjeff 407110584Sjeff VI_LOCK(vp); 408110584Sjeffloop1: 409110584Sjeff /* 410110584Sjeff * MARK/SCAN initialization to avoid infinite loops. 411110584Sjeff */ 412136943Sphk TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs) { 413110584Sjeff bp->b_vflags &= ~BV_SCANNED; 414110584Sjeff bp->b_error = 0; 415110584Sjeff } 416110584Sjeff 417110584Sjeff /* 418144584Sjeff * Flush all dirty buffers associated with a vnode. 419110584Sjeff */ 420110584Sjeffloop2: 421136943Sphk TAILQ_FOREACH_SAFE(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs, nbp) { 422110584Sjeff if ((bp->b_vflags & BV_SCANNED) != 0) 423110584Sjeff continue; 424110584Sjeff bp->b_vflags |= BV_SCANNED; 425111463Sjeff if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) 426110584Sjeff continue; 427110584Sjeff VI_UNLOCK(vp); 428147388Sjeff KASSERT(bp->b_bufobj == &vp->v_bufobj, 429147388Sjeff ("bp %p wrong b_bufobj %p should be %p", 430147388Sjeff bp, bp->b_bufobj, &vp->v_bufobj)); 431110584Sjeff if ((bp->b_flags & B_DELWRI) == 0) 432110588Sjeff panic("fsync: not dirty"); 433140734Sphk if ((vp->v_object != NULL) && (bp->b_flags & B_CLUSTEROK)) { 434110584Sjeff vfs_bio_awrite(bp); 435110584Sjeff } else { 436110584Sjeff bremfree(bp); 437110584Sjeff bawrite(bp); 438110584Sjeff } 439110584Sjeff VI_LOCK(vp); 440110584Sjeff goto loop2; 441110584Sjeff } 442110584Sjeff 443110584Sjeff /* 444110584Sjeff * If synchronous the caller expects us to completely resolve all 445110584Sjeff * dirty buffers in the system. Wait for in-progress I/O to 446110584Sjeff * complete (which could include background bitmap writes), then 447110584Sjeff * retry if dirty blocks still exist. 448110584Sjeff */ 449110584Sjeff if (ap->a_waitfor == MNT_WAIT) { 450136751Sphk bo = &vp->v_bufobj; 451136751Sphk bufobj_wwait(bo, 0, 0); 452136751Sphk if (bo->bo_dirty.bv_cnt > 0) { 453110584Sjeff /* 454110584Sjeff * If we are unable to write any of these buffers 455110584Sjeff * then we fail now rather than trying endlessly 456110584Sjeff * to write them out. 457110584Sjeff */ 458136751Sphk TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) 459110584Sjeff if ((error = bp->b_error) == 0) 460110584Sjeff continue; 461145732Sjeff if (error == 0 && --maxretry >= 0) 462110584Sjeff goto loop1; 463110584Sjeff error = EAGAIN; 464110584Sjeff } 465110584Sjeff } 466110584Sjeff VI_UNLOCK(vp); 467144584Sjeff if (error == EAGAIN) 468144584Sjeff vprint("fsync: giving up on dirty", vp); 469112067Skan 470110584Sjeff return (error); 471110584Sjeff} 472112067Skan 47391690Seivind/* XXX Needs good comment and more info in the manpage (VOP_GETPAGES(9)). */ 47476167Sphkint 47576167Sphkvop_stdgetpages(ap) 47676167Sphk struct vop_getpages_args /* { 47776167Sphk struct vnode *a_vp; 47876167Sphk vm_page_t *a_m; 47976167Sphk int a_count; 48076167Sphk int a_reqpage; 48176167Sphk vm_ooffset_t a_offset; 48276167Sphk } */ *ap; 48376167Sphk{ 48476131Sphk 48576167Sphk return vnode_pager_generic_getpages(ap->a_vp, ap->a_m, 48676167Sphk ap->a_count, ap->a_reqpage); 48776167Sphk} 48876167Sphk 489147198Sssouhlalint 490147198Sssouhlalvop_stdkqfilter(struct vop_kqfilter_args *ap) 491147198Sssouhlal{ 492147198Sssouhlal return vfs_kqfilter(ap); 493147198Sssouhlal} 494147198Sssouhlal 49591690Seivind/* XXX Needs good comment and more info in the manpage (VOP_PUTPAGES(9)). */ 49676319Sphkint 49776167Sphkvop_stdputpages(ap) 49876167Sphk struct vop_putpages_args /* { 49976167Sphk struct vnode *a_vp; 50076167Sphk vm_page_t *a_m; 50176167Sphk int a_count; 50276167Sphk int a_sync; 50376167Sphk int *a_rtvals; 50476167Sphk vm_ooffset_t a_offset; 50576167Sphk } */ *ap; 50676167Sphk{ 50776167Sphk 50876319Sphk return vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count, 50976167Sphk ap->a_sync, ap->a_rtvals); 51076167Sphk} 51176167Sphk 512112067Skan/* 51351068Salfred * vfs default ops 51491690Seivind * used to fill the vfs function table to get reasonable default return values. 51551068Salfred */ 51691690Seivindint 517144054Sjeffvfs_stdroot (mp, flags, vpp, td) 51851068Salfred struct mount *mp; 519144054Sjeff int flags; 52051068Salfred struct vnode **vpp; 521132023Salfred struct thread *td; 52251068Salfred{ 523131734Salfred 52451068Salfred return (EOPNOTSUPP); 52551068Salfred} 52651068Salfred 52791690Seivindint 52883366Sjulianvfs_stdstatfs (mp, sbp, td) 52951068Salfred struct mount *mp; 53051068Salfred struct statfs *sbp; 53183366Sjulian struct thread *td; 53251068Salfred{ 533131734Salfred 53451068Salfred return (EOPNOTSUPP); 53551068Salfred} 53651068Salfred 53751068Salfredint 53851068Salfredvfs_stdvptofh (vp, fhp) 53951068Salfred struct vnode *vp; 54051068Salfred struct fid *fhp; 54151068Salfred{ 542131734Salfred 54351068Salfred return (EOPNOTSUPP); 54451068Salfred} 54551068Salfred 54691690Seivindint 54783366Sjulianvfs_stdquotactl (mp, cmds, uid, arg, td) 54851068Salfred struct mount *mp; 54951068Salfred int cmds; 55051068Salfred uid_t uid; 551153400Sdes void *arg; 55283366Sjulian struct thread *td; 55351068Salfred{ 554131734Salfred 55551068Salfred return (EOPNOTSUPP); 55651068Salfred} 55751068Salfred 558112067Skanint 559140048Sphkvfs_stdsync(mp, waitfor, td) 56051068Salfred struct mount *mp; 56151068Salfred int waitfor; 56283366Sjulian struct thread *td; 56351068Salfred{ 564154152Stegge struct vnode *vp, *mvp; 565112119Skan int error, lockreq, allerror = 0; 566112119Skan 567112119Skan lockreq = LK_EXCLUSIVE | LK_INTERLOCK; 568112119Skan if (waitfor != MNT_WAIT) 569112119Skan lockreq |= LK_NOWAIT; 570112119Skan /* 571112119Skan * Force stale buffer cache information to be flushed. 572112119Skan */ 573122091Skan MNT_ILOCK(mp); 574112119Skanloop: 575154152Stegge MNT_VNODE_FOREACH(vp, mp, mvp) { 576112119Skan 577112119Skan VI_LOCK(vp); 578136943Sphk if (vp->v_bufobj.bo_dirty.bv_cnt == 0) { 579112119Skan VI_UNLOCK(vp); 580112119Skan continue; 581112119Skan } 582122091Skan MNT_IUNLOCK(mp); 583112119Skan 584112119Skan if ((error = vget(vp, lockreq, td)) != 0) { 585122091Skan MNT_ILOCK(mp); 586154152Stegge if (error == ENOENT) { 587154152Stegge MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp); 588112119Skan goto loop; 589154152Stegge } 590112119Skan continue; 591112119Skan } 592140048Sphk error = VOP_FSYNC(vp, waitfor, td); 593112119Skan if (error) 594112119Skan allerror = error; 595112119Skan 596155032Sjeff /* Do not turn this into vput. td is not always curthread. */ 597121874Skan VOP_UNLOCK(vp, 0, td); 598121874Skan vrele(vp); 599122091Skan MNT_ILOCK(mp); 600112119Skan } 601122091Skan MNT_IUNLOCK(mp); 602112119Skan return (allerror); 603112119Skan} 604112119Skan 605112119Skanint 606140048Sphkvfs_stdnosync (mp, waitfor, td) 607112119Skan struct mount *mp; 608112119Skan int waitfor; 609112119Skan struct thread *td; 610112119Skan{ 611131734Salfred 61251068Salfred return (0); 61351068Salfred} 61451068Salfred 615112067Skanint 61692462Smckusickvfs_stdvget (mp, ino, flags, vpp) 61751068Salfred struct mount *mp; 61851068Salfred ino_t ino; 61992462Smckusick int flags; 62051068Salfred struct vnode **vpp; 62151068Salfred{ 622131734Salfred 62351068Salfred return (EOPNOTSUPP); 62451068Salfred} 62551068Salfred 626112067Skanint 62751138Salfredvfs_stdfhtovp (mp, fhp, vpp) 62851068Salfred struct mount *mp; 62951068Salfred struct fid *fhp; 63051138Salfred struct vnode **vpp; 63151138Salfred{ 632131734Salfred 63351138Salfred return (EOPNOTSUPP); 63451138Salfred} 63551138Salfred 63651068Salfredint 637112067Skanvfs_stdinit (vfsp) 63851068Salfred struct vfsconf *vfsp; 63951068Salfred{ 640131734Salfred 64151068Salfred return (0); 64251068Salfred} 64351068Salfred 64451068Salfredint 64551068Salfredvfs_stduninit (vfsp) 64651068Salfred struct vfsconf *vfsp; 64751068Salfred{ 648131734Salfred 64951068Salfred return(0); 65051068Salfred} 65151068Salfred 65254803Srwatsonint 65383366Sjulianvfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, attrname, td) 65454803Srwatson struct mount *mp; 65554803Srwatson int cmd; 65674273Srwatson struct vnode *filename_vp; 65774437Srwatson int attrnamespace; 65856272Srwatson const char *attrname; 65983366Sjulian struct thread *td; 66054803Srwatson{ 661131734Salfred 662101786Sphk if (filename_vp != NULL) 663101786Sphk VOP_UNLOCK(filename_vp, 0, td); 664131734Salfred return (EOPNOTSUPP); 66554803Srwatson} 66654803Srwatson 667131733Salfredint 668131733Salfredvfs_stdsysctl(mp, op, req) 669131733Salfred struct mount *mp; 670131733Salfred fsctlop_t op; 671131733Salfred struct sysctl_req *req; 672131733Salfred{ 673131733Salfred 674131733Salfred return (EOPNOTSUPP); 675131733Salfred} 676131733Salfred 67751068Salfred/* end of vfs default ops */ 678