vfs_default.c revision 178243
1139804Simp/*- 230489Sphk * Copyright (c) 1989, 1993 330489Sphk * The Regents of the University of California. All rights reserved. 430489Sphk * 530489Sphk * This code is derived from software contributed 630489Sphk * to Berkeley by John Heidemann of the UCLA Ficus project. 730489Sphk * 830489Sphk * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project 930489Sphk * 1030489Sphk * Redistribution and use in source and binary forms, with or without 1130489Sphk * modification, are permitted provided that the following conditions 1230489Sphk * are met: 1330489Sphk * 1. Redistributions of source code must retain the above copyright 1430489Sphk * notice, this list of conditions and the following disclaimer. 1530489Sphk * 2. Redistributions in binary form must reproduce the above copyright 1630489Sphk * notice, this list of conditions and the following disclaimer in the 1730489Sphk * documentation and/or other materials provided with the distribution. 1830489Sphk * 4. Neither the name of the University nor the names of its contributors 1930489Sphk * may be used to endorse or promote products derived from this software 2030489Sphk * without specific prior written permission. 2130489Sphk * 2230489Sphk * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 2330489Sphk * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 2430489Sphk * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 2530489Sphk * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 2630489Sphk * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 2730489Sphk * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 2830489Sphk * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 2930489Sphk * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 3030489Sphk * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 3130489Sphk * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 3230489Sphk * SUCH DAMAGE. 3330489Sphk */ 3430489Sphk 35116182Sobrien#include <sys/cdefs.h> 36116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/vfs_default.c 178243 2008-04-16 11:33:32Z kib $"); 37116182Sobrien 3830489Sphk#include <sys/param.h> 3930489Sphk#include <sys/systm.h> 4060041Sphk#include <sys/bio.h> 4144272Sbde#include <sys/buf.h> 4265770Sbp#include <sys/conf.h> 43147198Sssouhlal#include <sys/event.h> 4430489Sphk#include <sys/kernel.h> 45114216Skan#include <sys/limits.h> 4631561Sbde#include <sys/lock.h> 47178243Skib#include <sys/lockf.h> 4830743Sphk#include <sys/malloc.h> 4951068Salfred#include <sys/mount.h> 5067365Sjhb#include <sys/mutex.h> 5130492Sphk#include <sys/unistd.h> 5230489Sphk#include <sys/vnode.h> 5330743Sphk#include <sys/poll.h> 5430489Sphk 5565770Sbp#include <vm/vm.h> 5665770Sbp#include <vm/vm_object.h> 5765770Sbp#include <vm/vm_extern.h> 5865770Sbp#include <vm/pmap.h> 5965770Sbp#include <vm/vm_map.h> 6065770Sbp#include <vm/vm_page.h> 6165770Sbp#include <vm/vm_pager.h> 6265770Sbp#include <vm/vnode_pager.h> 6365770Sbp 6492723Salfredstatic int vop_nolookup(struct vop_lookup_args *); 6592723Salfredstatic int vop_nostrategy(struct vop_strategy_args *); 6630489Sphk 6730489Sphk/* 6830489Sphk * This vnode table stores what we want to do if the filesystem doesn't 6930489Sphk * implement a particular VOP. 7030489Sphk * 7130489Sphk * If there is no specific entry here, we will return EOPNOTSUPP. 7230489Sphk * 7330489Sphk */ 7430489Sphk 75138290Sphkstruct vop_vector default_vnodeops = { 76138290Sphk .vop_default = NULL, 77138339Sphk .vop_bypass = VOP_EOPNOTSUPP, 78138339Sphk 79178243Skib .vop_advlock = vop_stdadvlock, 80178243Skib .vop_advlockasync = vop_stdadvlockasync, 81138290Sphk .vop_bmap = vop_stdbmap, 82138290Sphk .vop_close = VOP_NULL, 83138290Sphk .vop_fsync = VOP_NULL, 84138290Sphk .vop_getpages = vop_stdgetpages, 85138290Sphk .vop_getwritemount = vop_stdgetwritemount, 86143494Sjeff .vop_inactive = VOP_NULL, 87138290Sphk .vop_ioctl = VOP_ENOTTY, 88147198Sssouhlal .vop_kqfilter = vop_stdkqfilter, 89138290Sphk .vop_islocked = vop_stdislocked, 90138290Sphk .vop_lease = VOP_NULL, 91169671Skib .vop_lock1 = vop_stdlock, 92138290Sphk .vop_lookup = vop_nolookup, 93138290Sphk .vop_open = VOP_NULL, 94138290Sphk .vop_pathconf = VOP_EINVAL, 95138290Sphk .vop_poll = vop_nopoll, 96138290Sphk .vop_putpages = vop_stdputpages, 97138290Sphk .vop_readlink = VOP_EINVAL, 98138290Sphk .vop_revoke = VOP_PANIC, 99138290Sphk .vop_strategy = vop_nostrategy, 100138290Sphk .vop_unlock = vop_stdunlock, 101166774Spjd .vop_vptofh = vop_stdvptofh, 10230489Sphk}; 10330489Sphk 10491690Seivind/* 10591690Seivind * Series of placeholder functions for various error returns for 10691690Seivind * VOPs. 10791690Seivind */ 10891690Seivind 10930489Sphkint 11030492Sphkvop_eopnotsupp(struct vop_generic_args *ap) 11130489Sphk{ 11230489Sphk /* 11330492Sphk printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name); 11430489Sphk */ 11530489Sphk 11630489Sphk return (EOPNOTSUPP); 11730489Sphk} 11830489Sphk 11930489Sphkint 12030492Sphkvop_ebadf(struct vop_generic_args *ap) 12130489Sphk{ 12230489Sphk 12330492Sphk return (EBADF); 12430492Sphk} 12530492Sphk 12630492Sphkint 12730492Sphkvop_enotty(struct vop_generic_args *ap) 12830492Sphk{ 12930492Sphk 13030492Sphk return (ENOTTY); 13130492Sphk} 13230492Sphk 13330492Sphkint 13430492Sphkvop_einval(struct vop_generic_args *ap) 13530492Sphk{ 13630492Sphk 13730492Sphk return (EINVAL); 13830492Sphk} 13930492Sphk 14030492Sphkint 14130492Sphkvop_null(struct vop_generic_args *ap) 14230492Sphk{ 14330492Sphk 14430492Sphk return (0); 14530492Sphk} 14630492Sphk 14791690Seivind/* 14891690Seivind * Helper function to panic on some bad VOPs in some filesystems. 14991690Seivind */ 15041056Speterint 15141056Spetervop_panic(struct vop_generic_args *ap) 15241056Speter{ 15341056Speter 15472594Sbde panic("filesystem goof: vop_panic[%s]", ap->a_desc->vdesc_name); 15541056Speter} 15641056Speter 15791690Seivind/* 15891690Seivind * vop_std<something> and vop_no<something> are default functions for use by 15991690Seivind * filesystems that need the "default reasonable" implementation for a 16091690Seivind * particular operation. 16191690Seivind * 16291690Seivind * The documentation for the operations they implement exists (if it exists) 16391690Seivind * in the VOP_<SOMETHING>(9) manpage (all uppercase). 16491690Seivind */ 16591690Seivind 16691690Seivind/* 16791690Seivind * Default vop for filesystems that do not support name lookup 16891690Seivind */ 16972594Sbdestatic int 17072594Sbdevop_nolookup(ap) 17172594Sbde struct vop_lookup_args /* { 17272594Sbde struct vnode *a_dvp; 17372594Sbde struct vnode **a_vpp; 17472594Sbde struct componentname *a_cnp; 17572594Sbde } */ *ap; 17672594Sbde{ 17772594Sbde 17872594Sbde *ap->a_vpp = NULL; 17972594Sbde return (ENOTDIR); 18072594Sbde} 18172594Sbde 18246349Salc/* 18346349Salc * vop_nostrategy: 18446349Salc * 18546349Salc * Strategy routine for VFS devices that have none. 18646349Salc * 18758934Sphk * BIO_ERROR and B_INVAL must be cleared prior to calling any strategy 18858345Sphk * routine. Typically this is done for a BIO_READ strategy call. 189112067Skan * Typically B_INVAL is assumed to already be clear prior to a write 19058345Sphk * and should not be cleared manually unless you just made the buffer 19158934Sphk * invalid. BIO_ERROR should be cleared either way. 19246349Salc */ 19346349Salc 19430489Sphkstatic int 19530489Sphkvop_nostrategy (struct vop_strategy_args *ap) 19630489Sphk{ 19730489Sphk printf("No strategy for buffer at %p\n", ap->a_bp); 198111842Snjl vprint("vnode", ap->a_vp); 19958934Sphk ap->a_bp->b_ioflags |= BIO_ERROR; 20030489Sphk ap->a_bp->b_error = EOPNOTSUPP; 20159249Sphk bufdone(ap->a_bp); 20230489Sphk return (EOPNOTSUPP); 20330489Sphk} 20430492Sphk 20591690Seivind/* 206178243Skib * Advisory record locking support 207178243Skib */ 208178243Skibint 209178243Skibvop_stdadvlock(struct vop_advlock_args *ap) 210178243Skib{ 211178243Skib struct vnode *vp = ap->a_vp; 212178243Skib struct thread *td = curthread; 213178243Skib struct vattr vattr; 214178243Skib int error; 215178243Skib 216178243Skib vn_lock(vp, LK_SHARED | LK_RETRY); 217178243Skib error = VOP_GETATTR(vp, &vattr, td->td_ucred, td); 218178243Skib VOP_UNLOCK(vp, 0); 219178243Skib if (error) 220178243Skib return (error); 221178243Skib 222178243Skib return (lf_advlock(ap, &(vp->v_lockf), vattr.va_size)); 223178243Skib} 224178243Skib 225178243Skibint 226178243Skibvop_stdadvlockasync(struct vop_advlockasync_args *ap) 227178243Skib{ 228178243Skib struct vnode *vp = ap->a_vp; 229178243Skib struct thread *td = curthread; 230178243Skib struct vattr vattr; 231178243Skib int error; 232178243Skib 233178243Skib vn_lock(vp, LK_SHARED | LK_RETRY); 234178243Skib error = VOP_GETATTR(vp, &vattr, td->td_ucred, td); 235178243Skib VOP_UNLOCK(vp, 0); 236178243Skib if (error) 237178243Skib return (error); 238178243Skib 239178243Skib return (lf_advlockasync(ap, &(vp->v_lockf), vattr.va_size)); 240178243Skib} 241178243Skib 242178243Skib/* 24391690Seivind * vop_stdpathconf: 244112067Skan * 24591690Seivind * Standard implementation of POSIX pathconf, to get information about limits 24691690Seivind * for a filesystem. 24791690Seivind * Override per filesystem for the case where the filesystem has smaller 24891690Seivind * limits. 24991690Seivind */ 25030492Sphkint 25130492Sphkvop_stdpathconf(ap) 25230492Sphk struct vop_pathconf_args /* { 25330492Sphk struct vnode *a_vp; 25430492Sphk int a_name; 25530492Sphk int *a_retval; 25630492Sphk } */ *ap; 25730492Sphk{ 25830492Sphk 25930492Sphk switch (ap->a_name) { 260149175Sphk case _PC_NAME_MAX: 261149175Sphk *ap->a_retval = NAME_MAX; 262149175Sphk return (0); 263149175Sphk case _PC_PATH_MAX: 264149175Sphk *ap->a_retval = PATH_MAX; 265149175Sphk return (0); 26630492Sphk case _PC_LINK_MAX: 26730492Sphk *ap->a_retval = LINK_MAX; 26830492Sphk return (0); 26930492Sphk case _PC_MAX_CANON: 27030492Sphk *ap->a_retval = MAX_CANON; 27130492Sphk return (0); 27230492Sphk case _PC_MAX_INPUT: 27330492Sphk *ap->a_retval = MAX_INPUT; 27430492Sphk return (0); 27530492Sphk case _PC_PIPE_BUF: 27630492Sphk *ap->a_retval = PIPE_BUF; 27730492Sphk return (0); 27830492Sphk case _PC_CHOWN_RESTRICTED: 27930492Sphk *ap->a_retval = 1; 28030492Sphk return (0); 28130492Sphk case _PC_VDISABLE: 28230492Sphk *ap->a_retval = _POSIX_VDISABLE; 28330492Sphk return (0); 28430492Sphk default: 28530492Sphk return (EINVAL); 28630492Sphk } 28730492Sphk /* NOTREACHED */ 28830492Sphk} 28930513Sphk 29030513Sphk/* 29130513Sphk * Standard lock, unlock and islocked functions. 29230513Sphk */ 29330513Sphkint 29430513Sphkvop_stdlock(ap) 295169671Skib struct vop_lock1_args /* { 29630513Sphk struct vnode *a_vp; 29730513Sphk int a_flags; 298164248Skmacy char *file; 299164248Skmacy int line; 30030513Sphk } */ *ap; 301112067Skan{ 30266355Sbp struct vnode *vp = ap->a_vp; 30330513Sphk 304176320Sattilio return (_lockmgr_args(vp->v_vnlock, ap->a_flags, VI_MTX(vp), 305176320Sattilio LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, ap->a_file, 306175635Sattilio ap->a_line)); 30730513Sphk} 30830513Sphk 30991690Seivind/* See above. */ 31030513Sphkint 31130513Sphkvop_stdunlock(ap) 31230513Sphk struct vop_unlock_args /* { 31330513Sphk struct vnode *a_vp; 31430513Sphk int a_flags; 31530513Sphk } */ *ap; 31630513Sphk{ 31766355Sbp struct vnode *vp = ap->a_vp; 31830513Sphk 319175635Sattilio return (lockmgr(vp->v_vnlock, ap->a_flags | LK_RELEASE, VI_MTX(vp))); 32030513Sphk} 32130513Sphk 32291690Seivind/* See above. */ 32330513Sphkint 32430513Sphkvop_stdislocked(ap) 32530513Sphk struct vop_islocked_args /* { 32630513Sphk struct vnode *a_vp; 32730513Sphk } */ *ap; 32830513Sphk{ 32930513Sphk 330176559Sattilio return (lockstatus(ap->a_vp->v_vnlock)); 33130513Sphk} 33230513Sphk 33330743Sphk/* 33430743Sphk * Return true for select/poll. 33530743Sphk */ 33630743Sphkint 33730743Sphkvop_nopoll(ap) 33830743Sphk struct vop_poll_args /* { 33930743Sphk struct vnode *a_vp; 34030743Sphk int a_events; 34130743Sphk struct ucred *a_cred; 34283366Sjulian struct thread *a_td; 34330743Sphk } */ *ap; 34430743Sphk{ 34530743Sphk /* 34631727Swollman * Return true for read/write. If the user asked for something 34731727Swollman * special, return POLLNVAL, so that clients have a way of 34831727Swollman * determining reliably whether or not the extended 34931727Swollman * functionality is present without hard-coding knowledge 35031727Swollman * of specific filesystem implementations. 351120514Sphk * Stay in sync with kern_conf.c::no_poll(). 35230743Sphk */ 35331727Swollman if (ap->a_events & ~POLLSTANDARD) 35431727Swollman return (POLLNVAL); 35531727Swollman 35630743Sphk return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 35730743Sphk} 35830743Sphk 35931727Swollman/* 36031727Swollman * Implement poll for local filesystems that support it. 36131727Swollman */ 36230743Sphkint 36331727Swollmanvop_stdpoll(ap) 36431727Swollman struct vop_poll_args /* { 36531727Swollman struct vnode *a_vp; 36631727Swollman int a_events; 36731727Swollman struct ucred *a_cred; 36883366Sjulian struct thread *a_td; 36931727Swollman } */ *ap; 37031727Swollman{ 37176578Sjlemon if (ap->a_events & ~POLLSTANDARD) 37283366Sjulian return (vn_pollrecord(ap->a_vp, ap->a_td, ap->a_events)); 37376578Sjlemon return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 37431727Swollman} 37531727Swollman 37630743Sphk/* 37762976Smckusick * Return our mount point, as we will take charge of the writes. 37862976Smckusick */ 37962976Smckusickint 38062976Smckusickvop_stdgetwritemount(ap) 38162976Smckusick struct vop_getwritemount_args /* { 38262976Smckusick struct vnode *a_vp; 38362976Smckusick struct mount **a_mpp; 38462976Smckusick } */ *ap; 38562976Smckusick{ 386157323Sjeff struct mount *mp; 38762976Smckusick 388157323Sjeff /* 389157323Sjeff * XXX Since this is called unlocked we may be recycled while 390157323Sjeff * attempting to ref the mount. If this is the case or mountpoint 391157323Sjeff * will be set to NULL. We only have to prevent this call from 392157323Sjeff * returning with a ref to an incorrect mountpoint. It is not 393157323Sjeff * harmful to return with a ref to our previous mountpoint. 394157323Sjeff */ 395157323Sjeff mp = ap->a_vp->v_mount; 396162455Stegge if (mp != NULL) { 397162455Stegge vfs_ref(mp); 398162455Stegge if (mp != ap->a_vp->v_mount) { 399162455Stegge vfs_rel(mp); 400162455Stegge mp = NULL; 401162455Stegge } 402157323Sjeff } 403157323Sjeff *(ap->a_mpp) = mp; 40462976Smckusick return (0); 40562976Smckusick} 40662976Smckusick 40791690Seivind/* XXX Needs good comment and VOP_BMAP(9) manpage */ 40876131Sphkint 40976131Sphkvop_stdbmap(ap) 410112067Skan struct vop_bmap_args /* { 41176131Sphk struct vnode *a_vp; 41276131Sphk daddr_t a_bn; 413137726Sphk struct bufobj **a_bop; 41476131Sphk daddr_t *a_bnp; 41576131Sphk int *a_runp; 41676131Sphk int *a_runb; 41776131Sphk } */ *ap; 41876131Sphk{ 41976131Sphk 420137726Sphk if (ap->a_bop != NULL) 421137726Sphk *ap->a_bop = &ap->a_vp->v_bufobj; 42276131Sphk if (ap->a_bnp != NULL) 42376131Sphk *ap->a_bnp = ap->a_bn * btodb(ap->a_vp->v_mount->mnt_stat.f_iosize); 42476131Sphk if (ap->a_runp != NULL) 42576131Sphk *ap->a_runp = 0; 42676131Sphk if (ap->a_runb != NULL) 42776131Sphk *ap->a_runb = 0; 42876131Sphk return (0); 42976131Sphk} 43076131Sphk 431110584Sjeffint 432110584Sjeffvop_stdfsync(ap) 433110584Sjeff struct vop_fsync_args /* { 434110584Sjeff struct vnode *a_vp; 435110584Sjeff struct ucred *a_cred; 436110584Sjeff int a_waitfor; 437110584Sjeff struct thread *a_td; 438110584Sjeff } */ *ap; 439110584Sjeff{ 440110584Sjeff struct vnode *vp = ap->a_vp; 441110584Sjeff struct buf *bp; 442136751Sphk struct bufobj *bo; 443110584Sjeff struct buf *nbp; 444145732Sjeff int error = 0; 445144584Sjeff int maxretry = 1000; /* large, arbitrarily chosen */ 446110584Sjeff 447177493Sjeff bo = &vp->v_bufobj; 448177493Sjeff BO_LOCK(bo); 449110584Sjeffloop1: 450110584Sjeff /* 451110584Sjeff * MARK/SCAN initialization to avoid infinite loops. 452110584Sjeff */ 453177493Sjeff TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) { 454110584Sjeff bp->b_vflags &= ~BV_SCANNED; 455110584Sjeff bp->b_error = 0; 456110584Sjeff } 457110584Sjeff 458110584Sjeff /* 459144584Sjeff * Flush all dirty buffers associated with a vnode. 460110584Sjeff */ 461110584Sjeffloop2: 462177493Sjeff TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 463110584Sjeff if ((bp->b_vflags & BV_SCANNED) != 0) 464110584Sjeff continue; 465110584Sjeff bp->b_vflags |= BV_SCANNED; 466111463Sjeff if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) 467110584Sjeff continue; 468177493Sjeff BO_UNLOCK(bo); 469177493Sjeff KASSERT(bp->b_bufobj == bo, 470147388Sjeff ("bp %p wrong b_bufobj %p should be %p", 471177493Sjeff bp, bp->b_bufobj, bo)); 472110584Sjeff if ((bp->b_flags & B_DELWRI) == 0) 473110588Sjeff panic("fsync: not dirty"); 474140734Sphk if ((vp->v_object != NULL) && (bp->b_flags & B_CLUSTEROK)) { 475110584Sjeff vfs_bio_awrite(bp); 476110584Sjeff } else { 477110584Sjeff bremfree(bp); 478110584Sjeff bawrite(bp); 479110584Sjeff } 480177493Sjeff BO_LOCK(bo); 481110584Sjeff goto loop2; 482110584Sjeff } 483110584Sjeff 484110584Sjeff /* 485110584Sjeff * If synchronous the caller expects us to completely resolve all 486110584Sjeff * dirty buffers in the system. Wait for in-progress I/O to 487110584Sjeff * complete (which could include background bitmap writes), then 488110584Sjeff * retry if dirty blocks still exist. 489110584Sjeff */ 490110584Sjeff if (ap->a_waitfor == MNT_WAIT) { 491136751Sphk bufobj_wwait(bo, 0, 0); 492136751Sphk if (bo->bo_dirty.bv_cnt > 0) { 493110584Sjeff /* 494110584Sjeff * If we are unable to write any of these buffers 495110584Sjeff * then we fail now rather than trying endlessly 496110584Sjeff * to write them out. 497110584Sjeff */ 498136751Sphk TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) 499110584Sjeff if ((error = bp->b_error) == 0) 500110584Sjeff continue; 501145732Sjeff if (error == 0 && --maxretry >= 0) 502110584Sjeff goto loop1; 503110584Sjeff error = EAGAIN; 504110584Sjeff } 505110584Sjeff } 506177493Sjeff BO_UNLOCK(bo); 507144584Sjeff if (error == EAGAIN) 508144584Sjeff vprint("fsync: giving up on dirty", vp); 509112067Skan 510110584Sjeff return (error); 511110584Sjeff} 512112067Skan 51391690Seivind/* XXX Needs good comment and more info in the manpage (VOP_GETPAGES(9)). */ 51476167Sphkint 51576167Sphkvop_stdgetpages(ap) 51676167Sphk struct vop_getpages_args /* { 51776167Sphk struct vnode *a_vp; 51876167Sphk vm_page_t *a_m; 51976167Sphk int a_count; 52076167Sphk int a_reqpage; 52176167Sphk vm_ooffset_t a_offset; 52276167Sphk } */ *ap; 52376167Sphk{ 52476131Sphk 52576167Sphk return vnode_pager_generic_getpages(ap->a_vp, ap->a_m, 52676167Sphk ap->a_count, ap->a_reqpage); 52776167Sphk} 52876167Sphk 529147198Sssouhlalint 530147198Sssouhlalvop_stdkqfilter(struct vop_kqfilter_args *ap) 531147198Sssouhlal{ 532147198Sssouhlal return vfs_kqfilter(ap); 533147198Sssouhlal} 534147198Sssouhlal 53591690Seivind/* XXX Needs good comment and more info in the manpage (VOP_PUTPAGES(9)). */ 53676319Sphkint 53776167Sphkvop_stdputpages(ap) 53876167Sphk struct vop_putpages_args /* { 53976167Sphk struct vnode *a_vp; 54076167Sphk vm_page_t *a_m; 54176167Sphk int a_count; 54276167Sphk int a_sync; 54376167Sphk int *a_rtvals; 54476167Sphk vm_ooffset_t a_offset; 54576167Sphk } */ *ap; 54676167Sphk{ 54776167Sphk 54876319Sphk return vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count, 54976167Sphk ap->a_sync, ap->a_rtvals); 55076167Sphk} 55176167Sphk 552166774Spjdint 553166774Spjdvop_stdvptofh(struct vop_vptofh_args *ap) 554166774Spjd{ 555166795Spjd return (EOPNOTSUPP); 556166774Spjd} 557166774Spjd 558112067Skan/* 55951068Salfred * vfs default ops 56091690Seivind * used to fill the vfs function table to get reasonable default return values. 56151068Salfred */ 56291690Seivindint 563144054Sjeffvfs_stdroot (mp, flags, vpp, td) 56451068Salfred struct mount *mp; 565144054Sjeff int flags; 56651068Salfred struct vnode **vpp; 567132023Salfred struct thread *td; 56851068Salfred{ 569131734Salfred 57051068Salfred return (EOPNOTSUPP); 57151068Salfred} 57251068Salfred 57391690Seivindint 57483366Sjulianvfs_stdstatfs (mp, sbp, td) 57551068Salfred struct mount *mp; 57651068Salfred struct statfs *sbp; 57783366Sjulian struct thread *td; 57851068Salfred{ 579131734Salfred 58051068Salfred return (EOPNOTSUPP); 58151068Salfred} 58251068Salfred 58351068Salfredint 58483366Sjulianvfs_stdquotactl (mp, cmds, uid, arg, td) 58551068Salfred struct mount *mp; 58651068Salfred int cmds; 58751068Salfred uid_t uid; 588153400Sdes void *arg; 58983366Sjulian struct thread *td; 59051068Salfred{ 591131734Salfred 59251068Salfred return (EOPNOTSUPP); 59351068Salfred} 59451068Salfred 595112067Skanint 596140048Sphkvfs_stdsync(mp, waitfor, td) 59751068Salfred struct mount *mp; 59851068Salfred int waitfor; 59983366Sjulian struct thread *td; 60051068Salfred{ 601154152Stegge struct vnode *vp, *mvp; 602112119Skan int error, lockreq, allerror = 0; 603112119Skan 604112119Skan lockreq = LK_EXCLUSIVE | LK_INTERLOCK; 605112119Skan if (waitfor != MNT_WAIT) 606112119Skan lockreq |= LK_NOWAIT; 607112119Skan /* 608112119Skan * Force stale buffer cache information to be flushed. 609112119Skan */ 610122091Skan MNT_ILOCK(mp); 611112119Skanloop: 612154152Stegge MNT_VNODE_FOREACH(vp, mp, mvp) { 613177493Sjeff /* bv_cnt is an acceptable race here. */ 614177493Sjeff if (vp->v_bufobj.bo_dirty.bv_cnt == 0) 615177493Sjeff continue; 616112119Skan VI_LOCK(vp); 617122091Skan MNT_IUNLOCK(mp); 618112119Skan if ((error = vget(vp, lockreq, td)) != 0) { 619122091Skan MNT_ILOCK(mp); 620154152Stegge if (error == ENOENT) { 621154152Stegge MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp); 622112119Skan goto loop; 623154152Stegge } 624112119Skan continue; 625112119Skan } 626140048Sphk error = VOP_FSYNC(vp, waitfor, td); 627112119Skan if (error) 628112119Skan allerror = error; 629112119Skan 630155032Sjeff /* Do not turn this into vput. td is not always curthread. */ 631175294Sattilio VOP_UNLOCK(vp, 0); 632121874Skan vrele(vp); 633122091Skan MNT_ILOCK(mp); 634112119Skan } 635122091Skan MNT_IUNLOCK(mp); 636112119Skan return (allerror); 637112119Skan} 638112119Skan 639112119Skanint 640140048Sphkvfs_stdnosync (mp, waitfor, td) 641112119Skan struct mount *mp; 642112119Skan int waitfor; 643112119Skan struct thread *td; 644112119Skan{ 645131734Salfred 64651068Salfred return (0); 64751068Salfred} 64851068Salfred 649112067Skanint 65092462Smckusickvfs_stdvget (mp, ino, flags, vpp) 65151068Salfred struct mount *mp; 65251068Salfred ino_t ino; 65392462Smckusick int flags; 65451068Salfred struct vnode **vpp; 65551068Salfred{ 656131734Salfred 65751068Salfred return (EOPNOTSUPP); 65851068Salfred} 65951068Salfred 660112067Skanint 66151138Salfredvfs_stdfhtovp (mp, fhp, vpp) 66251068Salfred struct mount *mp; 66351068Salfred struct fid *fhp; 66451138Salfred struct vnode **vpp; 66551138Salfred{ 666131734Salfred 66751138Salfred return (EOPNOTSUPP); 66851138Salfred} 66951138Salfred 67051068Salfredint 671112067Skanvfs_stdinit (vfsp) 67251068Salfred struct vfsconf *vfsp; 67351068Salfred{ 674131734Salfred 67551068Salfred return (0); 67651068Salfred} 67751068Salfred 67851068Salfredint 67951068Salfredvfs_stduninit (vfsp) 68051068Salfred struct vfsconf *vfsp; 68151068Salfred{ 682131734Salfred 68351068Salfred return(0); 68451068Salfred} 68551068Salfred 68654803Srwatsonint 68783366Sjulianvfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, attrname, td) 68854803Srwatson struct mount *mp; 68954803Srwatson int cmd; 69074273Srwatson struct vnode *filename_vp; 69174437Srwatson int attrnamespace; 69256272Srwatson const char *attrname; 69383366Sjulian struct thread *td; 69454803Srwatson{ 695131734Salfred 696101786Sphk if (filename_vp != NULL) 697175294Sattilio VOP_UNLOCK(filename_vp, 0); 698131734Salfred return (EOPNOTSUPP); 69954803Srwatson} 70054803Srwatson 701131733Salfredint 702131733Salfredvfs_stdsysctl(mp, op, req) 703131733Salfred struct mount *mp; 704131733Salfred fsctlop_t op; 705131733Salfred struct sysctl_req *req; 706131733Salfred{ 707131733Salfred 708131733Salfred return (EOPNOTSUPP); 709131733Salfred} 710131733Salfred 71151068Salfred/* end of vfs default ops */ 712