1139804Simp/*- 230489Sphk * Copyright (c) 1989, 1993 330489Sphk * The Regents of the University of California. All rights reserved. 430489Sphk * 530489Sphk * This code is derived from software contributed 630489Sphk * to Berkeley by John Heidemann of the UCLA Ficus project. 730489Sphk * 830489Sphk * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project 930489Sphk * 1030489Sphk * Redistribution and use in source and binary forms, with or without 1130489Sphk * modification, are permitted provided that the following conditions 1230489Sphk * are met: 1330489Sphk * 1. Redistributions of source code must retain the above copyright 1430489Sphk * notice, this list of conditions and the following disclaimer. 1530489Sphk * 2. Redistributions in binary form must reproduce the above copyright 1630489Sphk * notice, this list of conditions and the following disclaimer in the 1730489Sphk * documentation and/or other materials provided with the distribution. 1830489Sphk * 4. Neither the name of the University nor the names of its contributors 1930489Sphk * may be used to endorse or promote products derived from this software 2030489Sphk * without specific prior written permission. 2130489Sphk * 2230489Sphk * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 2330489Sphk * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 2430489Sphk * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 2530489Sphk * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 2630489Sphk * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 2730489Sphk * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 2830489Sphk * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 2930489Sphk * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 3030489Sphk * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 3130489Sphk * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 3230489Sphk * SUCH DAMAGE. 3330489Sphk */ 3430489Sphk 35116182Sobrien#include <sys/cdefs.h> 36116182Sobrien__FBSDID("$FreeBSD: stable/11/sys/kern/vfs_default.c 350126 2019-07-19 14:24:33Z asomers $"); 37116182Sobrien 3830489Sphk#include <sys/param.h> 3930489Sphk#include <sys/systm.h> 4060041Sphk#include <sys/bio.h> 4144272Sbde#include <sys/buf.h> 4265770Sbp#include <sys/conf.h> 43147198Sssouhlal#include <sys/event.h> 4430489Sphk#include <sys/kernel.h> 45114216Skan#include <sys/limits.h> 4631561Sbde#include <sys/lock.h> 47178243Skib#include <sys/lockf.h> 4830743Sphk#include <sys/malloc.h> 4951068Salfred#include <sys/mount.h> 50189539Smarcus#include <sys/namei.h> 51248084Sattilio#include <sys/rwlock.h> 52189539Smarcus#include <sys/fcntl.h> 5330492Sphk#include <sys/unistd.h> 5430489Sphk#include <sys/vnode.h> 55189539Smarcus#include <sys/dirent.h> 5630743Sphk#include <sys/poll.h> 5730489Sphk 58193508Srwatson#include <security/mac/mac_framework.h> 59193508Srwatson 6065770Sbp#include <vm/vm.h> 6165770Sbp#include <vm/vm_object.h> 6265770Sbp#include <vm/vm_extern.h> 6365770Sbp#include <vm/pmap.h> 6465770Sbp#include <vm/vm_map.h> 6565770Sbp#include <vm/vm_page.h> 6665770Sbp#include <vm/vm_pager.h> 6765770Sbp#include <vm/vnode_pager.h> 6865770Sbp 6992723Salfredstatic int vop_nolookup(struct vop_lookup_args *); 70206094Skibstatic int vop_norename(struct vop_rename_args *); 7192723Salfredstatic int vop_nostrategy(struct vop_strategy_args *); 72189539Smarcusstatic int get_next_dirent(struct vnode *vp, struct dirent **dpp, 73189539Smarcus char *dirbuf, int dirbuflen, off_t *off, 74189539Smarcus char **cpos, int *len, int *eofflag, 75189539Smarcus struct thread *td); 76189539Smarcusstatic int dirent_exists(struct vnode *vp, const char *dirname, 77189539Smarcus struct thread *td); 7830489Sphk 79189539Smarcus#define DIRENT_MINSIZE (sizeof(struct dirent) - (MAXNAMLEN+1) + 4) 80189539Smarcus 81241025Skibstatic int vop_stdis_text(struct vop_is_text_args *ap); 82241025Skibstatic int vop_stdset_text(struct vop_set_text_args *ap); 83241025Skibstatic int vop_stdunset_text(struct vop_unset_text_args *ap); 84242476Skibstatic int vop_stdget_writecount(struct vop_get_writecount_args *ap); 85242476Skibstatic int vop_stdadd_writecount(struct vop_add_writecount_args *ap); 86304977Skibstatic int vop_stdfdatasync(struct vop_fdatasync_args *ap); 87274914Sglebiusstatic int vop_stdgetpages_async(struct vop_getpages_async_args *ap); 88241025Skib 8930489Sphk/* 9030489Sphk * This vnode table stores what we want to do if the filesystem doesn't 9130489Sphk * implement a particular VOP. 9230489Sphk * 9330489Sphk * If there is no specific entry here, we will return EOPNOTSUPP. 9430489Sphk * 95197680Strasz * Note that every filesystem has to implement either vop_access 96197680Strasz * or vop_accessx; failing to do so will result in immediate crash 97197680Strasz * due to stack overflow, as vop_stdaccess() calls vop_stdaccessx(), 98197680Strasz * which calls vop_stdaccess() etc. 9930489Sphk */ 10030489Sphk 101138290Sphkstruct vop_vector default_vnodeops = { 102138290Sphk .vop_default = NULL, 103138339Sphk .vop_bypass = VOP_EOPNOTSUPP, 104138339Sphk 105197680Strasz .vop_access = vop_stdaccess, 106193092Strasz .vop_accessx = vop_stdaccessx, 107227070Sjhb .vop_advise = vop_stdadvise, 108178243Skib .vop_advlock = vop_stdadvlock, 109178243Skib .vop_advlockasync = vop_stdadvlockasync, 110208003Szml .vop_advlockpurge = vop_stdadvlockpurge, 111220791Smdf .vop_allocate = vop_stdallocate, 112138290Sphk .vop_bmap = vop_stdbmap, 113138290Sphk .vop_close = VOP_NULL, 114138290Sphk .vop_fsync = VOP_NULL, 115304977Skib .vop_fdatasync = vop_stdfdatasync, 116138290Sphk .vop_getpages = vop_stdgetpages, 117274914Sglebius .vop_getpages_async = vop_stdgetpages_async, 118138290Sphk .vop_getwritemount = vop_stdgetwritemount, 119143494Sjeff .vop_inactive = VOP_NULL, 120138290Sphk .vop_ioctl = VOP_ENOTTY, 121147198Sssouhlal .vop_kqfilter = vop_stdkqfilter, 122138290Sphk .vop_islocked = vop_stdislocked, 123169671Skib .vop_lock1 = vop_stdlock, 124138290Sphk .vop_lookup = vop_nolookup, 125138290Sphk .vop_open = VOP_NULL, 126138290Sphk .vop_pathconf = VOP_EINVAL, 127138290Sphk .vop_poll = vop_nopoll, 128138290Sphk .vop_putpages = vop_stdputpages, 129138290Sphk .vop_readlink = VOP_EINVAL, 130206094Skib .vop_rename = vop_norename, 131138290Sphk .vop_revoke = VOP_PANIC, 132138290Sphk .vop_strategy = vop_nostrategy, 133138290Sphk .vop_unlock = vop_stdunlock, 134189539Smarcus .vop_vptocnp = vop_stdvptocnp, 135166774Spjd .vop_vptofh = vop_stdvptofh, 136232317Strociny .vop_unp_bind = vop_stdunp_bind, 137232317Strociny .vop_unp_connect = vop_stdunp_connect, 138232317Strociny .vop_unp_detach = vop_stdunp_detach, 139241025Skib .vop_is_text = vop_stdis_text, 140241025Skib .vop_set_text = vop_stdset_text, 141241025Skib .vop_unset_text = vop_stdunset_text, 142242476Skib .vop_get_writecount = vop_stdget_writecount, 143242476Skib .vop_add_writecount = vop_stdadd_writecount, 14430489Sphk}; 14530489Sphk 14691690Seivind/* 14791690Seivind * Series of placeholder functions for various error returns for 14891690Seivind * VOPs. 14991690Seivind */ 15091690Seivind 15130489Sphkint 15230492Sphkvop_eopnotsupp(struct vop_generic_args *ap) 15330489Sphk{ 15430489Sphk /* 15530492Sphk printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name); 15630489Sphk */ 15730489Sphk 15830489Sphk return (EOPNOTSUPP); 15930489Sphk} 16030489Sphk 16130489Sphkint 16230492Sphkvop_ebadf(struct vop_generic_args *ap) 16330489Sphk{ 16430489Sphk 16530492Sphk return (EBADF); 16630492Sphk} 16730492Sphk 16830492Sphkint 16930492Sphkvop_enotty(struct vop_generic_args *ap) 17030492Sphk{ 17130492Sphk 17230492Sphk return (ENOTTY); 17330492Sphk} 17430492Sphk 17530492Sphkint 17630492Sphkvop_einval(struct vop_generic_args *ap) 17730492Sphk{ 17830492Sphk 17930492Sphk return (EINVAL); 18030492Sphk} 18130492Sphk 18230492Sphkint 183185956Smarcusvop_enoent(struct vop_generic_args *ap) 184185956Smarcus{ 185185956Smarcus 186185956Smarcus return (ENOENT); 187185956Smarcus} 188185956Smarcus 189185956Smarcusint 19030492Sphkvop_null(struct vop_generic_args *ap) 19130492Sphk{ 19230492Sphk 19330492Sphk return (0); 19430492Sphk} 19530492Sphk 19691690Seivind/* 19791690Seivind * Helper function to panic on some bad VOPs in some filesystems. 19891690Seivind */ 19941056Speterint 20041056Spetervop_panic(struct vop_generic_args *ap) 20141056Speter{ 20241056Speter 20372594Sbde panic("filesystem goof: vop_panic[%s]", ap->a_desc->vdesc_name); 20441056Speter} 20541056Speter 20691690Seivind/* 20791690Seivind * vop_std<something> and vop_no<something> are default functions for use by 20891690Seivind * filesystems that need the "default reasonable" implementation for a 20991690Seivind * particular operation. 21091690Seivind * 21191690Seivind * The documentation for the operations they implement exists (if it exists) 21291690Seivind * in the VOP_<SOMETHING>(9) manpage (all uppercase). 21391690Seivind */ 21491690Seivind 21591690Seivind/* 21691690Seivind * Default vop for filesystems that do not support name lookup 21791690Seivind */ 21872594Sbdestatic int 21972594Sbdevop_nolookup(ap) 22072594Sbde struct vop_lookup_args /* { 22172594Sbde struct vnode *a_dvp; 22272594Sbde struct vnode **a_vpp; 22372594Sbde struct componentname *a_cnp; 22472594Sbde } */ *ap; 22572594Sbde{ 22672594Sbde 22772594Sbde *ap->a_vpp = NULL; 22872594Sbde return (ENOTDIR); 22972594Sbde} 23072594Sbde 23146349Salc/* 232206094Skib * vop_norename: 233206094Skib * 234206094Skib * Handle unlock and reference counting for arguments of vop_rename 235206094Skib * for filesystems that do not implement rename operation. 236206094Skib */ 237206094Skibstatic int 238206094Skibvop_norename(struct vop_rename_args *ap) 239206094Skib{ 240206094Skib 241206094Skib vop_rename_fail(ap); 242206094Skib return (EOPNOTSUPP); 243206094Skib} 244206094Skib 245206094Skib/* 24646349Salc * vop_nostrategy: 24746349Salc * 24846349Salc * Strategy routine for VFS devices that have none. 24946349Salc * 25058934Sphk * BIO_ERROR and B_INVAL must be cleared prior to calling any strategy 25158345Sphk * routine. Typically this is done for a BIO_READ strategy call. 252112067Skan * Typically B_INVAL is assumed to already be clear prior to a write 25358345Sphk * and should not be cleared manually unless you just made the buffer 25458934Sphk * invalid. BIO_ERROR should be cleared either way. 25546349Salc */ 25646349Salc 25730489Sphkstatic int 25830489Sphkvop_nostrategy (struct vop_strategy_args *ap) 25930489Sphk{ 26030489Sphk printf("No strategy for buffer at %p\n", ap->a_bp); 261304983Skib vn_printf(ap->a_vp, "vnode "); 26258934Sphk ap->a_bp->b_ioflags |= BIO_ERROR; 26330489Sphk ap->a_bp->b_error = EOPNOTSUPP; 26459249Sphk bufdone(ap->a_bp); 26530489Sphk return (EOPNOTSUPP); 26630489Sphk} 26730492Sphk 268189539Smarcusstatic int 269189539Smarcusget_next_dirent(struct vnode *vp, struct dirent **dpp, char *dirbuf, 270189539Smarcus int dirbuflen, off_t *off, char **cpos, int *len, 271189539Smarcus int *eofflag, struct thread *td) 272189539Smarcus{ 273189539Smarcus int error, reclen; 274189539Smarcus struct uio uio; 275189539Smarcus struct iovec iov; 276189539Smarcus struct dirent *dp; 277189539Smarcus 278189539Smarcus KASSERT(VOP_ISLOCKED(vp), ("vp %p is not locked", vp)); 279189539Smarcus KASSERT(vp->v_type == VDIR, ("vp %p is not a directory", vp)); 280189539Smarcus 281189539Smarcus if (*len == 0) { 282189539Smarcus iov.iov_base = dirbuf; 283189539Smarcus iov.iov_len = dirbuflen; 284189539Smarcus 285189539Smarcus uio.uio_iov = &iov; 286189539Smarcus uio.uio_iovcnt = 1; 287189539Smarcus uio.uio_offset = *off; 288189539Smarcus uio.uio_resid = dirbuflen; 289189539Smarcus uio.uio_segflg = UIO_SYSSPACE; 290189539Smarcus uio.uio_rw = UIO_READ; 291189539Smarcus uio.uio_td = td; 292189539Smarcus 293189539Smarcus *eofflag = 0; 294189539Smarcus 295189539Smarcus#ifdef MAC 296189539Smarcus error = mac_vnode_check_readdir(td->td_ucred, vp); 297189539Smarcus if (error == 0) 298189539Smarcus#endif 299189539Smarcus error = VOP_READDIR(vp, &uio, td->td_ucred, eofflag, 300189539Smarcus NULL, NULL); 301189539Smarcus if (error) 302189539Smarcus return (error); 303189539Smarcus 304211818Sbrian *off = uio.uio_offset; 305211818Sbrian 306211684Sbrian *cpos = dirbuf; 307211818Sbrian *len = (dirbuflen - uio.uio_resid); 308211818Sbrian 309211818Sbrian if (*len == 0) 310211818Sbrian return (ENOENT); 311189539Smarcus } 312189539Smarcus 313189539Smarcus dp = (struct dirent *)(*cpos); 314189539Smarcus reclen = dp->d_reclen; 315189539Smarcus *dpp = dp; 316189539Smarcus 317189539Smarcus /* check for malformed directory.. */ 318189539Smarcus if (reclen < DIRENT_MINSIZE) 319189539Smarcus return (EINVAL); 320189539Smarcus 321189539Smarcus *cpos += reclen; 322189539Smarcus *len -= reclen; 323189539Smarcus 324189539Smarcus return (0); 325189539Smarcus} 326189539Smarcus 32791690Seivind/* 328189539Smarcus * Check if a named file exists in a given directory vnode. 329189539Smarcus */ 330189539Smarcusstatic int 331189539Smarcusdirent_exists(struct vnode *vp, const char *dirname, struct thread *td) 332189539Smarcus{ 333189539Smarcus char *dirbuf, *cpos; 334189539Smarcus int error, eofflag, dirbuflen, len, found; 335189539Smarcus off_t off; 336189539Smarcus struct dirent *dp; 337189539Smarcus struct vattr va; 338189539Smarcus 339189539Smarcus KASSERT(VOP_ISLOCKED(vp), ("vp %p is not locked", vp)); 340189539Smarcus KASSERT(vp->v_type == VDIR, ("vp %p is not a directory", vp)); 341189539Smarcus 342189539Smarcus found = 0; 343189539Smarcus 344189539Smarcus error = VOP_GETATTR(vp, &va, td->td_ucred); 345189539Smarcus if (error) 346189539Smarcus return (found); 347189539Smarcus 348189539Smarcus dirbuflen = DEV_BSIZE; 349189539Smarcus if (dirbuflen < va.va_blocksize) 350189539Smarcus dirbuflen = va.va_blocksize; 351189539Smarcus dirbuf = (char *)malloc(dirbuflen, M_TEMP, M_WAITOK); 352189539Smarcus 353189539Smarcus off = 0; 354189539Smarcus len = 0; 355189539Smarcus do { 356189539Smarcus error = get_next_dirent(vp, &dp, dirbuf, dirbuflen, &off, 357189539Smarcus &cpos, &len, &eofflag, td); 358189539Smarcus if (error) 359189539Smarcus goto out; 360189539Smarcus 361235503Sgleb if (dp->d_type != DT_WHT && dp->d_fileno != 0 && 362235503Sgleb strcmp(dp->d_name, dirname) == 0) { 363189539Smarcus found = 1; 364189539Smarcus goto out; 365189539Smarcus } 366189539Smarcus } while (len > 0 || !eofflag); 367189539Smarcus 368189539Smarcusout: 369189539Smarcus free(dirbuf, M_TEMP); 370189539Smarcus return (found); 371189539Smarcus} 372189539Smarcus 373193092Straszint 374197680Straszvop_stdaccess(struct vop_access_args *ap) 375197680Strasz{ 376197680Strasz 377197680Strasz KASSERT((ap->a_accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | 378197680Strasz VAPPEND)) == 0, ("invalid bit in accmode")); 379197680Strasz 380197680Strasz return (VOP_ACCESSX(ap->a_vp, ap->a_accmode, ap->a_cred, ap->a_td)); 381197680Strasz} 382197680Strasz 383197680Straszint 384193092Straszvop_stdaccessx(struct vop_accessx_args *ap) 385193092Strasz{ 386193092Strasz int error; 387193092Strasz accmode_t accmode = ap->a_accmode; 388193092Strasz 389193092Strasz error = vfs_unixify_accmode(&accmode); 390193092Strasz if (error != 0) 391193092Strasz return (error); 392193092Strasz 393193092Strasz if (accmode == 0) 394193092Strasz return (0); 395193092Strasz 396193092Strasz return (VOP_ACCESS(ap->a_vp, accmode, ap->a_cred, ap->a_td)); 397193092Strasz} 398193092Strasz 399189539Smarcus/* 400178243Skib * Advisory record locking support 401178243Skib */ 402178243Skibint 403178243Skibvop_stdadvlock(struct vop_advlock_args *ap) 404178243Skib{ 405182371Sattilio struct vnode *vp; 406178243Skib struct vattr vattr; 407178243Skib int error; 408178243Skib 409182371Sattilio vp = ap->a_vp; 410276192Srmacklem if (ap->a_fl->l_whence == SEEK_END) { 411276192Srmacklem /* 412276200Srmacklem * The NFSv4 server must avoid doing a vn_lock() here, since it 413276200Srmacklem * can deadlock the nfsd threads, due to a LOR. Fortunately 414276200Srmacklem * the NFSv4 server always uses SEEK_SET and this code is 415276200Srmacklem * only required for the SEEK_END case. 416276192Srmacklem */ 417276192Srmacklem vn_lock(vp, LK_SHARED | LK_RETRY); 418276192Srmacklem error = VOP_GETATTR(vp, &vattr, curthread->td_ucred); 419276192Srmacklem VOP_UNLOCK(vp, 0); 420276192Srmacklem if (error) 421276192Srmacklem return (error); 422276192Srmacklem } else 423276192Srmacklem vattr.va_size = 0; 424178243Skib 425178243Skib return (lf_advlock(ap, &(vp->v_lockf), vattr.va_size)); 426178243Skib} 427178243Skib 428178243Skibint 429178243Skibvop_stdadvlockasync(struct vop_advlockasync_args *ap) 430178243Skib{ 431182371Sattilio struct vnode *vp; 432178243Skib struct vattr vattr; 433178243Skib int error; 434178243Skib 435182371Sattilio vp = ap->a_vp; 436276192Srmacklem if (ap->a_fl->l_whence == SEEK_END) { 437276192Srmacklem /* The size argument is only needed for SEEK_END. */ 438276192Srmacklem vn_lock(vp, LK_SHARED | LK_RETRY); 439276192Srmacklem error = VOP_GETATTR(vp, &vattr, curthread->td_ucred); 440276192Srmacklem VOP_UNLOCK(vp, 0); 441276192Srmacklem if (error) 442276192Srmacklem return (error); 443276192Srmacklem } else 444276192Srmacklem vattr.va_size = 0; 445178243Skib 446178243Skib return (lf_advlockasync(ap, &(vp->v_lockf), vattr.va_size)); 447178243Skib} 448178243Skib 449208003Szmlint 450208003Szmlvop_stdadvlockpurge(struct vop_advlockpurge_args *ap) 451208003Szml{ 452208003Szml struct vnode *vp; 453208003Szml 454208003Szml vp = ap->a_vp; 455208003Szml lf_purgelocks(vp, &vp->v_lockf); 456208003Szml return (0); 457208003Szml} 458208003Szml 459178243Skib/* 46091690Seivind * vop_stdpathconf: 461112067Skan * 46291690Seivind * Standard implementation of POSIX pathconf, to get information about limits 46391690Seivind * for a filesystem. 46491690Seivind * Override per filesystem for the case where the filesystem has smaller 46591690Seivind * limits. 46691690Seivind */ 46730492Sphkint 46830492Sphkvop_stdpathconf(ap) 46930492Sphk struct vop_pathconf_args /* { 47030492Sphk struct vnode *a_vp; 47130492Sphk int a_name; 47230492Sphk int *a_retval; 47330492Sphk } */ *ap; 47430492Sphk{ 47530492Sphk 47630492Sphk switch (ap->a_name) { 477296572Sjhb case _PC_ASYNC_IO: 478296572Sjhb *ap->a_retval = _POSIX_ASYNCHRONOUS_IO; 479296572Sjhb return (0); 480149175Sphk case _PC_PATH_MAX: 481149175Sphk *ap->a_retval = PATH_MAX; 482149175Sphk return (0); 483346032Ssjg case _PC_ACL_EXTENDED: 484346032Ssjg case _PC_ACL_NFS4: 485346032Ssjg case _PC_CAP_PRESENT: 486346032Ssjg case _PC_INF_PRESENT: 487346032Ssjg case _PC_MAC_PRESENT: 488346032Ssjg *ap->a_retval = 0; 489346032Ssjg return (0); 49030492Sphk default: 49130492Sphk return (EINVAL); 49230492Sphk } 49330492Sphk /* NOTREACHED */ 49430492Sphk} 49530513Sphk 49630513Sphk/* 49730513Sphk * Standard lock, unlock and islocked functions. 49830513Sphk */ 49930513Sphkint 50030513Sphkvop_stdlock(ap) 501169671Skib struct vop_lock1_args /* { 50230513Sphk struct vnode *a_vp; 50330513Sphk int a_flags; 504164248Skmacy char *file; 505164248Skmacy int line; 50630513Sphk } */ *ap; 507112067Skan{ 50866355Sbp struct vnode *vp = ap->a_vp; 509315375Smjg struct mtx *ilk; 51030513Sphk 511315375Smjg ilk = VI_MTX(vp); 512315375Smjg return (lockmgr_lock_fast_path(vp->v_vnlock, ap->a_flags, 513315375Smjg (ilk != NULL) ? &ilk->lock_object : NULL, ap->a_file, ap->a_line)); 51430513Sphk} 51530513Sphk 51691690Seivind/* See above. */ 51730513Sphkint 51830513Sphkvop_stdunlock(ap) 51930513Sphk struct vop_unlock_args /* { 52030513Sphk struct vnode *a_vp; 52130513Sphk int a_flags; 52230513Sphk } */ *ap; 52330513Sphk{ 52466355Sbp struct vnode *vp = ap->a_vp; 525315375Smjg struct mtx *ilk; 52630513Sphk 527315375Smjg ilk = VI_MTX(vp); 528315375Smjg return (lockmgr_unlock_fast_path(vp->v_vnlock, ap->a_flags, 529315375Smjg (ilk != NULL) ? &ilk->lock_object : NULL)); 53030513Sphk} 53130513Sphk 53291690Seivind/* See above. */ 53330513Sphkint 53430513Sphkvop_stdislocked(ap) 53530513Sphk struct vop_islocked_args /* { 53630513Sphk struct vnode *a_vp; 53730513Sphk } */ *ap; 53830513Sphk{ 53930513Sphk 540176559Sattilio return (lockstatus(ap->a_vp->v_vnlock)); 54130513Sphk} 54230513Sphk 54330743Sphk/* 54430743Sphk * Return true for select/poll. 54530743Sphk */ 54630743Sphkint 54730743Sphkvop_nopoll(ap) 54830743Sphk struct vop_poll_args /* { 54930743Sphk struct vnode *a_vp; 55030743Sphk int a_events; 55130743Sphk struct ucred *a_cred; 55283366Sjulian struct thread *a_td; 55330743Sphk } */ *ap; 55430743Sphk{ 55531727Swollman 556189450Skib return (poll_no_poll(ap->a_events)); 55730743Sphk} 55830743Sphk 55931727Swollman/* 56031727Swollman * Implement poll for local filesystems that support it. 56131727Swollman */ 56230743Sphkint 56331727Swollmanvop_stdpoll(ap) 56431727Swollman struct vop_poll_args /* { 56531727Swollman struct vnode *a_vp; 56631727Swollman int a_events; 56731727Swollman struct ucred *a_cred; 56883366Sjulian struct thread *a_td; 56931727Swollman } */ *ap; 57031727Swollman{ 57176578Sjlemon if (ap->a_events & ~POLLSTANDARD) 57283366Sjulian return (vn_pollrecord(ap->a_vp, ap->a_td, ap->a_events)); 57376578Sjlemon return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 57431727Swollman} 57531727Swollman 57630743Sphk/* 57762976Smckusick * Return our mount point, as we will take charge of the writes. 57862976Smckusick */ 57962976Smckusickint 58062976Smckusickvop_stdgetwritemount(ap) 58162976Smckusick struct vop_getwritemount_args /* { 58262976Smckusick struct vnode *a_vp; 58362976Smckusick struct mount **a_mpp; 58462976Smckusick } */ *ap; 58562976Smckusick{ 586157323Sjeff struct mount *mp; 58762976Smckusick 588157323Sjeff /* 589157323Sjeff * XXX Since this is called unlocked we may be recycled while 590157323Sjeff * attempting to ref the mount. If this is the case or mountpoint 591157323Sjeff * will be set to NULL. We only have to prevent this call from 592157323Sjeff * returning with a ref to an incorrect mountpoint. It is not 593157323Sjeff * harmful to return with a ref to our previous mountpoint. 594157323Sjeff */ 595157323Sjeff mp = ap->a_vp->v_mount; 596162455Stegge if (mp != NULL) { 597162455Stegge vfs_ref(mp); 598162455Stegge if (mp != ap->a_vp->v_mount) { 599162455Stegge vfs_rel(mp); 600162455Stegge mp = NULL; 601162455Stegge } 602157323Sjeff } 603157323Sjeff *(ap->a_mpp) = mp; 60462976Smckusick return (0); 60562976Smckusick} 60662976Smckusick 607350126Sasomers/* 608350126Sasomers * If the file system doesn't implement VOP_BMAP, then return sensible defaults: 609350126Sasomers * - Return the vnode's bufobj instead of any underlying device's bufobj 610350126Sasomers * - Calculate the physical block number as if there were equal size 611350126Sasomers * consecutive blocks, but 612350126Sasomers * - Report no contiguous runs of blocks. 613350126Sasomers */ 61476131Sphkint 61576131Sphkvop_stdbmap(ap) 616112067Skan struct vop_bmap_args /* { 61776131Sphk struct vnode *a_vp; 61876131Sphk daddr_t a_bn; 619137726Sphk struct bufobj **a_bop; 62076131Sphk daddr_t *a_bnp; 62176131Sphk int *a_runp; 62276131Sphk int *a_runb; 62376131Sphk } */ *ap; 62476131Sphk{ 62576131Sphk 626137726Sphk if (ap->a_bop != NULL) 627137726Sphk *ap->a_bop = &ap->a_vp->v_bufobj; 62876131Sphk if (ap->a_bnp != NULL) 62976131Sphk *ap->a_bnp = ap->a_bn * btodb(ap->a_vp->v_mount->mnt_stat.f_iosize); 63076131Sphk if (ap->a_runp != NULL) 63176131Sphk *ap->a_runp = 0; 63276131Sphk if (ap->a_runb != NULL) 63376131Sphk *ap->a_runb = 0; 63476131Sphk return (0); 63576131Sphk} 63676131Sphk 637110584Sjeffint 638110584Sjeffvop_stdfsync(ap) 639110584Sjeff struct vop_fsync_args /* { 640110584Sjeff struct vnode *a_vp; 641110584Sjeff int a_waitfor; 642110584Sjeff struct thread *a_td; 643110584Sjeff } */ *ap; 644110584Sjeff{ 645328764Smckusick struct vnode *vp; 646328764Smckusick struct buf *bp, *nbp; 647136751Sphk struct bufobj *bo; 648328764Smckusick struct mount *mp; 649328764Smckusick int error, maxretry; 650110584Sjeff 651328764Smckusick error = 0; 652328764Smckusick maxretry = 10000; /* large, arbitrarily chosen */ 653328764Smckusick vp = ap->a_vp; 654328764Smckusick mp = NULL; 655328764Smckusick if (vp->v_type == VCHR) { 656328764Smckusick VI_LOCK(vp); 657328764Smckusick mp = vp->v_rdev->si_mountpt; 658328764Smckusick VI_UNLOCK(vp); 659328764Smckusick } 660177493Sjeff bo = &vp->v_bufobj; 661177493Sjeff BO_LOCK(bo); 662110584Sjeffloop1: 663110584Sjeff /* 664110584Sjeff * MARK/SCAN initialization to avoid infinite loops. 665110584Sjeff */ 666177493Sjeff TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) { 667110584Sjeff bp->b_vflags &= ~BV_SCANNED; 668110584Sjeff bp->b_error = 0; 669110584Sjeff } 670110584Sjeff 671110584Sjeff /* 672144584Sjeff * Flush all dirty buffers associated with a vnode. 673110584Sjeff */ 674110584Sjeffloop2: 675177493Sjeff TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 676110584Sjeff if ((bp->b_vflags & BV_SCANNED) != 0) 677110584Sjeff continue; 678110584Sjeff bp->b_vflags |= BV_SCANNED; 679236825Smckusick if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) { 680236825Smckusick if (ap->a_waitfor != MNT_WAIT) 681236825Smckusick continue; 682236825Smckusick if (BUF_LOCK(bp, 683236825Smckusick LK_EXCLUSIVE | LK_INTERLOCK | LK_SLEEPFAIL, 684251171Sjeff BO_LOCKPTR(bo)) != 0) { 685236825Smckusick BO_LOCK(bo); 686236825Smckusick goto loop1; 687236825Smckusick } 688236825Smckusick BO_LOCK(bo); 689236825Smckusick } 690177493Sjeff BO_UNLOCK(bo); 691177493Sjeff KASSERT(bp->b_bufobj == bo, 692147388Sjeff ("bp %p wrong b_bufobj %p should be %p", 693177493Sjeff bp, bp->b_bufobj, bo)); 694110584Sjeff if ((bp->b_flags & B_DELWRI) == 0) 695110588Sjeff panic("fsync: not dirty"); 696140734Sphk if ((vp->v_object != NULL) && (bp->b_flags & B_CLUSTEROK)) { 697110584Sjeff vfs_bio_awrite(bp); 698110584Sjeff } else { 699110584Sjeff bremfree(bp); 700110584Sjeff bawrite(bp); 701110584Sjeff } 702328764Smckusick if (maxretry < 1000) 703328764Smckusick pause("dirty", hz < 1000 ? 1 : hz / 1000); 704177493Sjeff BO_LOCK(bo); 705110584Sjeff goto loop2; 706110584Sjeff } 707110584Sjeff 708110584Sjeff /* 709110584Sjeff * If synchronous the caller expects us to completely resolve all 710110584Sjeff * dirty buffers in the system. Wait for in-progress I/O to 711110584Sjeff * complete (which could include background bitmap writes), then 712110584Sjeff * retry if dirty blocks still exist. 713110584Sjeff */ 714110584Sjeff if (ap->a_waitfor == MNT_WAIT) { 715136751Sphk bufobj_wwait(bo, 0, 0); 716136751Sphk if (bo->bo_dirty.bv_cnt > 0) { 717110584Sjeff /* 718110584Sjeff * If we are unable to write any of these buffers 719110584Sjeff * then we fail now rather than trying endlessly 720110584Sjeff * to write them out. 721110584Sjeff */ 722136751Sphk TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) 723110584Sjeff if ((error = bp->b_error) == 0) 724110584Sjeff continue; 725328764Smckusick if ((mp != NULL && mp->mnt_secondary_writes > 0) || 726328764Smckusick (error == 0 && --maxretry >= 0)) 727110584Sjeff goto loop1; 728330265Smckusick if (error == 0) 729330265Smckusick error = EAGAIN; 730110584Sjeff } 731110584Sjeff } 732177493Sjeff BO_UNLOCK(bo); 733330265Smckusick if (error != 0) 734330265Smckusick vn_printf(vp, "fsync: giving up on dirty (error = %d) ", error); 735112067Skan 736110584Sjeff return (error); 737110584Sjeff} 738112067Skan 739304977Skibstatic int 740304977Skibvop_stdfdatasync(struct vop_fdatasync_args *ap) 741304977Skib{ 742304977Skib 743304977Skib return (VOP_FSYNC(ap->a_vp, MNT_WAIT, ap->a_td)); 744304977Skib} 745304977Skib 746304981Skibint 747304981Skibvop_stdfdatasync_buf(struct vop_fdatasync_args *ap) 748304981Skib{ 749304981Skib struct vop_fsync_args apf; 750304981Skib 751304981Skib apf.a_vp = ap->a_vp; 752304981Skib apf.a_waitfor = MNT_WAIT; 753304981Skib apf.a_td = ap->a_td; 754304981Skib return (vop_stdfsync(&apf)); 755304981Skib} 756304981Skib 75791690Seivind/* XXX Needs good comment and more info in the manpage (VOP_GETPAGES(9)). */ 75876167Sphkint 75976167Sphkvop_stdgetpages(ap) 76076167Sphk struct vop_getpages_args /* { 76176167Sphk struct vnode *a_vp; 76276167Sphk vm_page_t *a_m; 76376167Sphk int a_count; 764292373Sglebius int *a_rbehind; 765292373Sglebius int *a_rahead; 76676167Sphk } */ *ap; 76776167Sphk{ 76876131Sphk 76976167Sphk return vnode_pager_generic_getpages(ap->a_vp, ap->a_m, 770292373Sglebius ap->a_count, ap->a_rbehind, ap->a_rahead, NULL, NULL); 77176167Sphk} 77276167Sphk 773274914Sglebiusstatic int 774274914Sglebiusvop_stdgetpages_async(struct vop_getpages_async_args *ap) 775274914Sglebius{ 776274914Sglebius int error; 777274914Sglebius 778292373Sglebius error = VOP_GETPAGES(ap->a_vp, ap->a_m, ap->a_count, ap->a_rbehind, 779292373Sglebius ap->a_rahead); 780292373Sglebius ap->a_iodone(ap->a_arg, ap->a_m, ap->a_count, error); 781274914Sglebius return (error); 782274914Sglebius} 783274914Sglebius 784147198Sssouhlalint 785147198Sssouhlalvop_stdkqfilter(struct vop_kqfilter_args *ap) 786147198Sssouhlal{ 787147198Sssouhlal return vfs_kqfilter(ap); 788147198Sssouhlal} 789147198Sssouhlal 79091690Seivind/* XXX Needs good comment and more info in the manpage (VOP_PUTPAGES(9)). */ 79176319Sphkint 79276167Sphkvop_stdputpages(ap) 79376167Sphk struct vop_putpages_args /* { 79476167Sphk struct vnode *a_vp; 79576167Sphk vm_page_t *a_m; 79676167Sphk int a_count; 79776167Sphk int a_sync; 79876167Sphk int *a_rtvals; 79976167Sphk } */ *ap; 80076167Sphk{ 80176167Sphk 80276319Sphk return vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count, 80376167Sphk ap->a_sync, ap->a_rtvals); 80476167Sphk} 80576167Sphk 806166774Spjdint 807166774Spjdvop_stdvptofh(struct vop_vptofh_args *ap) 808166774Spjd{ 809166795Spjd return (EOPNOTSUPP); 810166774Spjd} 811166774Spjd 812189539Smarcusint 813189539Smarcusvop_stdvptocnp(struct vop_vptocnp_args *ap) 814189539Smarcus{ 815189539Smarcus struct vnode *vp = ap->a_vp; 816189539Smarcus struct vnode **dvp = ap->a_vpp; 817194601Skib struct ucred *cred = ap->a_cred; 818189539Smarcus char *buf = ap->a_buf; 819189539Smarcus int *buflen = ap->a_buflen; 820189539Smarcus char *dirbuf, *cpos; 821189539Smarcus int i, error, eofflag, dirbuflen, flags, locked, len, covered; 822189539Smarcus off_t off; 823189539Smarcus ino_t fileno; 824189539Smarcus struct vattr va; 825189539Smarcus struct nameidata nd; 826189539Smarcus struct thread *td; 827189539Smarcus struct dirent *dp; 828189539Smarcus struct vnode *mvp; 829189539Smarcus 830189539Smarcus i = *buflen; 831189539Smarcus error = 0; 832189539Smarcus covered = 0; 833189539Smarcus td = curthread; 834189539Smarcus 835189539Smarcus if (vp->v_type != VDIR) 836189539Smarcus return (ENOENT); 837189539Smarcus 838194601Skib error = VOP_GETATTR(vp, &va, cred); 839189539Smarcus if (error) 840189539Smarcus return (error); 841189539Smarcus 842189539Smarcus VREF(vp); 843189539Smarcus locked = VOP_ISLOCKED(vp); 844189539Smarcus VOP_UNLOCK(vp, 0); 845285135Smjg NDINIT_ATVP(&nd, LOOKUP, FOLLOW | LOCKSHARED | LOCKLEAF, UIO_SYSSPACE, 846189539Smarcus "..", vp, td); 847189539Smarcus flags = FREAD; 848194601Skib error = vn_open_cred(&nd, &flags, 0, VN_OPEN_NOAUDIT, cred, NULL); 849189539Smarcus if (error) { 850189539Smarcus vn_lock(vp, locked | LK_RETRY); 851189539Smarcus return (error); 852189539Smarcus } 853189539Smarcus NDFREE(&nd, NDF_ONLY_PNBUF); 854189539Smarcus 855189539Smarcus mvp = *dvp = nd.ni_vp; 856189539Smarcus 857189539Smarcus if (vp->v_mount != (*dvp)->v_mount && 858189539Smarcus ((*dvp)->v_vflag & VV_ROOT) && 859189539Smarcus ((*dvp)->v_mount->mnt_flag & MNT_UNION)) { 860189539Smarcus *dvp = (*dvp)->v_mount->mnt_vnodecovered; 861189539Smarcus VREF(mvp); 862189539Smarcus VOP_UNLOCK(mvp, 0); 863194601Skib vn_close(mvp, FREAD, cred, td); 864189539Smarcus VREF(*dvp); 865285135Smjg vn_lock(*dvp, LK_SHARED | LK_RETRY); 866189539Smarcus covered = 1; 867189539Smarcus } 868189539Smarcus 869189539Smarcus fileno = va.va_fileid; 870189539Smarcus 871189539Smarcus dirbuflen = DEV_BSIZE; 872189539Smarcus if (dirbuflen < va.va_blocksize) 873189539Smarcus dirbuflen = va.va_blocksize; 874189539Smarcus dirbuf = (char *)malloc(dirbuflen, M_TEMP, M_WAITOK); 875189539Smarcus 876189539Smarcus if ((*dvp)->v_type != VDIR) { 877189539Smarcus error = ENOENT; 878189539Smarcus goto out; 879189539Smarcus } 880189539Smarcus 881189539Smarcus off = 0; 882189539Smarcus len = 0; 883189539Smarcus do { 884189539Smarcus /* call VOP_READDIR of parent */ 885189539Smarcus error = get_next_dirent(*dvp, &dp, dirbuf, dirbuflen, &off, 886189539Smarcus &cpos, &len, &eofflag, td); 887189539Smarcus if (error) 888189539Smarcus goto out; 889189539Smarcus 890189539Smarcus if ((dp->d_type != DT_WHT) && 891189539Smarcus (dp->d_fileno == fileno)) { 892189539Smarcus if (covered) { 893189539Smarcus VOP_UNLOCK(*dvp, 0); 894285135Smjg vn_lock(mvp, LK_SHARED | LK_RETRY); 895189539Smarcus if (dirent_exists(mvp, dp->d_name, td)) { 896189539Smarcus error = ENOENT; 897189539Smarcus VOP_UNLOCK(mvp, 0); 898285135Smjg vn_lock(*dvp, LK_SHARED | LK_RETRY); 899189539Smarcus goto out; 900189539Smarcus } 901189539Smarcus VOP_UNLOCK(mvp, 0); 902285135Smjg vn_lock(*dvp, LK_SHARED | LK_RETRY); 903189539Smarcus } 904189539Smarcus i -= dp->d_namlen; 905189539Smarcus 906189539Smarcus if (i < 0) { 907189539Smarcus error = ENOMEM; 908189539Smarcus goto out; 909189539Smarcus } 910247560Skib if (dp->d_namlen == 1 && dp->d_name[0] == '.') { 911247560Skib error = ENOENT; 912247560Skib } else { 913247560Skib bcopy(dp->d_name, buf + i, dp->d_namlen); 914247560Skib error = 0; 915247560Skib } 916189539Smarcus goto out; 917189539Smarcus } 918189539Smarcus } while (len > 0 || !eofflag); 919189539Smarcus error = ENOENT; 920189539Smarcus 921189539Smarcusout: 922189539Smarcus free(dirbuf, M_TEMP); 923189539Smarcus if (!error) { 924189539Smarcus *buflen = i; 925227697Skib vref(*dvp); 926189539Smarcus } 927189539Smarcus if (covered) { 928189539Smarcus vput(*dvp); 929189539Smarcus vrele(mvp); 930189539Smarcus } else { 931189539Smarcus VOP_UNLOCK(mvp, 0); 932194601Skib vn_close(mvp, FREAD, cred, td); 933189539Smarcus } 934189539Smarcus vn_lock(vp, locked | LK_RETRY); 935189539Smarcus return (error); 936189539Smarcus} 937189539Smarcus 938220791Smdfint 939220791Smdfvop_stdallocate(struct vop_allocate_args *ap) 940220791Smdf{ 941220791Smdf#ifdef __notyet__ 942311957Skib struct statfs *sfs; 943311957Skib off_t maxfilesize = 0; 944220791Smdf#endif 945220791Smdf struct iovec aiov; 946220791Smdf struct vattr vattr, *vap; 947220791Smdf struct uio auio; 948220846Smdf off_t fsize, len, cur, offset; 949220791Smdf uint8_t *buf; 950220791Smdf struct thread *td; 951220791Smdf struct vnode *vp; 952220791Smdf size_t iosize; 953220846Smdf int error; 954220791Smdf 955220791Smdf buf = NULL; 956220791Smdf error = 0; 957220791Smdf td = curthread; 958220791Smdf vap = &vattr; 959220791Smdf vp = ap->a_vp; 960220846Smdf len = *ap->a_len; 961220846Smdf offset = *ap->a_offset; 962220791Smdf 963220791Smdf error = VOP_GETATTR(vp, vap, td->td_ucred); 964220791Smdf if (error != 0) 965220791Smdf goto out; 966220846Smdf fsize = vap->va_size; 967220791Smdf iosize = vap->va_blocksize; 968220791Smdf if (iosize == 0) 969220791Smdf iosize = BLKDEV_IOSIZE; 970220791Smdf if (iosize > MAXPHYS) 971220791Smdf iosize = MAXPHYS; 972220791Smdf buf = malloc(iosize, M_TEMP, M_WAITOK); 973220791Smdf 974220791Smdf#ifdef __notyet__ 975220791Smdf /* 976220791Smdf * Check if the filesystem sets f_maxfilesize; if not use 977220791Smdf * VOP_SETATTR to perform the check. 978220791Smdf */ 979311957Skib sfs = malloc(sizeof(struct statfs), M_STATFS, M_WAITOK); 980311957Skib error = VFS_STATFS(vp->v_mount, sfs, td); 981311957Skib if (error == 0) 982311957Skib maxfilesize = sfs->f_maxfilesize; 983311957Skib free(sfs, M_STATFS); 984220791Smdf if (error != 0) 985220791Smdf goto out; 986311957Skib if (maxfilesize) { 987311957Skib if (offset > maxfilesize || len > maxfilesize || 988311957Skib offset + len > maxfilesize) { 989220791Smdf error = EFBIG; 990220791Smdf goto out; 991220791Smdf } 992220791Smdf } else 993220791Smdf#endif 994220791Smdf if (offset + len > vap->va_size) { 995220846Smdf /* 996220846Smdf * Test offset + len against the filesystem's maxfilesize. 997220846Smdf */ 998220791Smdf VATTR_NULL(vap); 999220791Smdf vap->va_size = offset + len; 1000220791Smdf error = VOP_SETATTR(vp, vap, td->td_ucred); 1001220791Smdf if (error != 0) 1002220791Smdf goto out; 1003220846Smdf VATTR_NULL(vap); 1004220846Smdf vap->va_size = fsize; 1005220846Smdf error = VOP_SETATTR(vp, vap, td->td_ucred); 1006220846Smdf if (error != 0) 1007220846Smdf goto out; 1008220791Smdf } 1009220791Smdf 1010220846Smdf for (;;) { 1011220791Smdf /* 1012220791Smdf * Read and write back anything below the nominal file 1013220791Smdf * size. There's currently no way outside the filesystem 1014220791Smdf * to know whether this area is sparse or not. 1015220791Smdf */ 1016220791Smdf cur = iosize; 1017220791Smdf if ((offset % iosize) != 0) 1018220791Smdf cur -= (offset % iosize); 1019220791Smdf if (cur > len) 1020220791Smdf cur = len; 1021220846Smdf if (offset < fsize) { 1022220791Smdf aiov.iov_base = buf; 1023220791Smdf aiov.iov_len = cur; 1024220791Smdf auio.uio_iov = &aiov; 1025220791Smdf auio.uio_iovcnt = 1; 1026220791Smdf auio.uio_offset = offset; 1027220791Smdf auio.uio_resid = cur; 1028220791Smdf auio.uio_segflg = UIO_SYSSPACE; 1029220791Smdf auio.uio_rw = UIO_READ; 1030220791Smdf auio.uio_td = td; 1031220791Smdf error = VOP_READ(vp, &auio, 0, td->td_ucred); 1032220791Smdf if (error != 0) 1033220791Smdf break; 1034220791Smdf if (auio.uio_resid > 0) { 1035220791Smdf bzero(buf + cur - auio.uio_resid, 1036220791Smdf auio.uio_resid); 1037220791Smdf } 1038220791Smdf } else { 1039220791Smdf bzero(buf, cur); 1040220791Smdf } 1041220791Smdf 1042220791Smdf aiov.iov_base = buf; 1043220791Smdf aiov.iov_len = cur; 1044220791Smdf auio.uio_iov = &aiov; 1045220791Smdf auio.uio_iovcnt = 1; 1046220791Smdf auio.uio_offset = offset; 1047220791Smdf auio.uio_resid = cur; 1048220791Smdf auio.uio_segflg = UIO_SYSSPACE; 1049220791Smdf auio.uio_rw = UIO_WRITE; 1050220791Smdf auio.uio_td = td; 1051220791Smdf 1052220791Smdf error = VOP_WRITE(vp, &auio, 0, td->td_ucred); 1053220791Smdf if (error != 0) 1054220791Smdf break; 1055220791Smdf 1056220791Smdf len -= cur; 1057220791Smdf offset += cur; 1058220846Smdf if (len == 0) 1059220846Smdf break; 1060220846Smdf if (should_yield()) 1061220846Smdf break; 1062220791Smdf } 1063220791Smdf 1064220791Smdf out: 1065220846Smdf *ap->a_len = len; 1066220846Smdf *ap->a_offset = offset; 1067220791Smdf free(buf, M_TEMP); 1068220791Smdf return (error); 1069220791Smdf} 1070220791Smdf 1071227070Sjhbint 1072227070Sjhbvop_stdadvise(struct vop_advise_args *ap) 1073227070Sjhb{ 1074227070Sjhb struct vnode *vp; 1075292326Skib struct bufobj *bo; 1076292326Skib daddr_t startn, endn; 1077227070Sjhb off_t start, end; 1078288431Smarkj int bsize, error; 1079227070Sjhb 1080227070Sjhb vp = ap->a_vp; 1081227070Sjhb switch (ap->a_advice) { 1082227070Sjhb case POSIX_FADV_WILLNEED: 1083227070Sjhb /* 1084227070Sjhb * Do nothing for now. Filesystems should provide a 1085227070Sjhb * custom method which starts an asynchronous read of 1086227070Sjhb * the requested region. 1087227070Sjhb */ 1088227070Sjhb error = 0; 1089227070Sjhb break; 1090227070Sjhb case POSIX_FADV_DONTNEED: 1091227070Sjhb error = 0; 1092227070Sjhb vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1093227070Sjhb if (vp->v_iflag & VI_DOOMED) { 1094227070Sjhb VOP_UNLOCK(vp, 0); 1095227070Sjhb break; 1096227070Sjhb } 1097288431Smarkj 1098288431Smarkj /* 1099288431Smarkj * Deactivate pages in the specified range from the backing VM 1100288431Smarkj * object. Pages that are resident in the buffer cache will 1101288431Smarkj * remain wired until their corresponding buffers are released 1102288431Smarkj * below. 1103288431Smarkj */ 1104227070Sjhb if (vp->v_object != NULL) { 1105227070Sjhb start = trunc_page(ap->a_start); 1106227070Sjhb end = round_page(ap->a_end); 1107315475Salc VM_OBJECT_RLOCK(vp->v_object); 1108288431Smarkj vm_object_page_noreuse(vp->v_object, OFF_TO_IDX(start), 1109227070Sjhb OFF_TO_IDX(end)); 1110315475Salc VM_OBJECT_RUNLOCK(vp->v_object); 1111227070Sjhb } 1112288431Smarkj 1113292326Skib bo = &vp->v_bufobj; 1114292326Skib BO_RLOCK(bo); 1115288431Smarkj bsize = vp->v_bufobj.bo_bsize; 1116288431Smarkj startn = ap->a_start / bsize; 1117292326Skib endn = ap->a_end / bsize; 1118293197Skib error = bnoreuselist(&bo->bo_clean, bo, startn, endn); 1119293197Skib if (error == 0) 1120292326Skib error = bnoreuselist(&bo->bo_dirty, bo, startn, endn); 1121292326Skib BO_RUNLOCK(bo); 1122227070Sjhb VOP_UNLOCK(vp, 0); 1123227070Sjhb break; 1124227070Sjhb default: 1125227070Sjhb error = EINVAL; 1126227070Sjhb break; 1127227070Sjhb } 1128227070Sjhb return (error); 1129227070Sjhb} 1130227070Sjhb 1131232317Strocinyint 1132232317Strocinyvop_stdunp_bind(struct vop_unp_bind_args *ap) 1133232317Strociny{ 1134232317Strociny 1135232317Strociny ap->a_vp->v_socket = ap->a_socket; 1136232317Strociny return (0); 1137232317Strociny} 1138232317Strociny 1139232317Strocinyint 1140232317Strocinyvop_stdunp_connect(struct vop_unp_connect_args *ap) 1141232317Strociny{ 1142232317Strociny 1143232317Strociny *ap->a_socket = ap->a_vp->v_socket; 1144232317Strociny return (0); 1145232317Strociny} 1146232317Strociny 1147232317Strocinyint 1148232317Strocinyvop_stdunp_detach(struct vop_unp_detach_args *ap) 1149232317Strociny{ 1150232317Strociny 1151232317Strociny ap->a_vp->v_socket = NULL; 1152232317Strociny return (0); 1153232317Strociny} 1154232317Strociny 1155241025Skibstatic int 1156241025Skibvop_stdis_text(struct vop_is_text_args *ap) 1157241025Skib{ 1158241025Skib 1159241025Skib return ((ap->a_vp->v_vflag & VV_TEXT) != 0); 1160241025Skib} 1161241025Skib 1162241025Skibstatic int 1163241025Skibvop_stdset_text(struct vop_set_text_args *ap) 1164241025Skib{ 1165241025Skib 1166241025Skib ap->a_vp->v_vflag |= VV_TEXT; 1167241025Skib return (0); 1168241025Skib} 1169241025Skib 1170241025Skibstatic int 1171241025Skibvop_stdunset_text(struct vop_unset_text_args *ap) 1172241025Skib{ 1173241025Skib 1174241025Skib ap->a_vp->v_vflag &= ~VV_TEXT; 1175241025Skib return (0); 1176241025Skib} 1177241025Skib 1178242476Skibstatic int 1179242476Skibvop_stdget_writecount(struct vop_get_writecount_args *ap) 1180242476Skib{ 1181242476Skib 1182242476Skib *ap->a_writecount = ap->a_vp->v_writecount; 1183242476Skib return (0); 1184242476Skib} 1185242476Skib 1186242476Skibstatic int 1187242476Skibvop_stdadd_writecount(struct vop_add_writecount_args *ap) 1188242476Skib{ 1189242476Skib 1190242476Skib ap->a_vp->v_writecount += ap->a_inc; 1191242476Skib return (0); 1192242476Skib} 1193242476Skib 1194112067Skan/* 119551068Salfred * vfs default ops 119691690Seivind * used to fill the vfs function table to get reasonable default return values. 119751068Salfred */ 119891690Seivindint 1199191990Sattiliovfs_stdroot (mp, flags, vpp) 120051068Salfred struct mount *mp; 1201144054Sjeff int flags; 120251068Salfred struct vnode **vpp; 120351068Salfred{ 1204131734Salfred 120551068Salfred return (EOPNOTSUPP); 120651068Salfred} 120751068Salfred 120891690Seivindint 1209191990Sattiliovfs_stdstatfs (mp, sbp) 121051068Salfred struct mount *mp; 121151068Salfred struct statfs *sbp; 121251068Salfred{ 1213131734Salfred 121451068Salfred return (EOPNOTSUPP); 121551068Salfred} 121651068Salfred 121751068Salfredint 1218191990Sattiliovfs_stdquotactl (mp, cmds, uid, arg) 121951068Salfred struct mount *mp; 122051068Salfred int cmds; 122151068Salfred uid_t uid; 1222153400Sdes void *arg; 122351068Salfred{ 1224131734Salfred 122551068Salfred return (EOPNOTSUPP); 122651068Salfred} 122751068Salfred 1228112067Skanint 1229191990Sattiliovfs_stdsync(mp, waitfor) 123051068Salfred struct mount *mp; 123151068Salfred int waitfor; 123251068Salfred{ 1233154152Stegge struct vnode *vp, *mvp; 1234191990Sattilio struct thread *td; 1235112119Skan int error, lockreq, allerror = 0; 1236112119Skan 1237191990Sattilio td = curthread; 1238112119Skan lockreq = LK_EXCLUSIVE | LK_INTERLOCK; 1239112119Skan if (waitfor != MNT_WAIT) 1240112119Skan lockreq |= LK_NOWAIT; 1241112119Skan /* 1242112119Skan * Force stale buffer cache information to be flushed. 1243112119Skan */ 1244112119Skanloop: 1245234386Smckusick MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 1246234386Smckusick if (vp->v_bufobj.bo_dirty.bv_cnt == 0) { 1247234386Smckusick VI_UNLOCK(vp); 1248177493Sjeff continue; 1249234386Smckusick } 1250112119Skan if ((error = vget(vp, lockreq, td)) != 0) { 1251154152Stegge if (error == ENOENT) { 1252234386Smckusick MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 1253112119Skan goto loop; 1254154152Stegge } 1255112119Skan continue; 1256112119Skan } 1257140048Sphk error = VOP_FSYNC(vp, waitfor, td); 1258112119Skan if (error) 1259112119Skan allerror = error; 1260204065Spjd vput(vp); 1261112119Skan } 1262112119Skan return (allerror); 1263112119Skan} 1264112119Skan 1265112119Skanint 1266191990Sattiliovfs_stdnosync (mp, waitfor) 1267112119Skan struct mount *mp; 1268112119Skan int waitfor; 1269112119Skan{ 1270131734Salfred 127151068Salfred return (0); 127251068Salfred} 127351068Salfred 1274112067Skanint 127592462Smckusickvfs_stdvget (mp, ino, flags, vpp) 127651068Salfred struct mount *mp; 127751068Salfred ino_t ino; 127892462Smckusick int flags; 127951068Salfred struct vnode **vpp; 128051068Salfred{ 1281131734Salfred 128251068Salfred return (EOPNOTSUPP); 128351068Salfred} 128451068Salfred 1285112067Skanint 1286222167Srmacklemvfs_stdfhtovp (mp, fhp, flags, vpp) 128751068Salfred struct mount *mp; 128851068Salfred struct fid *fhp; 1289222167Srmacklem int flags; 129051138Salfred struct vnode **vpp; 129151138Salfred{ 1292131734Salfred 129351138Salfred return (EOPNOTSUPP); 129451138Salfred} 129551138Salfred 129651068Salfredint 1297112067Skanvfs_stdinit (vfsp) 129851068Salfred struct vfsconf *vfsp; 129951068Salfred{ 1300131734Salfred 130151068Salfred return (0); 130251068Salfred} 130351068Salfred 130451068Salfredint 130551068Salfredvfs_stduninit (vfsp) 130651068Salfred struct vfsconf *vfsp; 130751068Salfred{ 1308131734Salfred 130951068Salfred return(0); 131051068Salfred} 131151068Salfred 131254803Srwatsonint 1313191990Sattiliovfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, attrname) 131454803Srwatson struct mount *mp; 131554803Srwatson int cmd; 131674273Srwatson struct vnode *filename_vp; 131774437Srwatson int attrnamespace; 131856272Srwatson const char *attrname; 131954803Srwatson{ 1320131734Salfred 1321101786Sphk if (filename_vp != NULL) 1322175294Sattilio VOP_UNLOCK(filename_vp, 0); 1323131734Salfred return (EOPNOTSUPP); 132454803Srwatson} 132554803Srwatson 1326131733Salfredint 1327131733Salfredvfs_stdsysctl(mp, op, req) 1328131733Salfred struct mount *mp; 1329131733Salfred fsctlop_t op; 1330131733Salfred struct sysctl_req *req; 1331131733Salfred{ 1332131733Salfred 1333131733Salfred return (EOPNOTSUPP); 1334131733Salfred} 1335131733Salfred 133651068Salfred/* end of vfs default ops */ 1337